예제 #1
0
    def test_update_elb_all_defaults(self, mock_config, **kwargs):
        """
        update_elb calls get_or_create_elb with default port and protocol values if all are missing
        """
        aws = DiscoAWS(config=self._get_elb_config(), environment_name=TEST_ENV_NAME, elb=MagicMock())
        aws.elb.get_or_create_elb = MagicMock(return_value=MagicMock())
        aws.get_meta_network_by_name = _get_meta_network_mock()
        aws.elb.delete_elb = MagicMock()

        aws.update_elb("mhcelb", update_autoscaling=False)

        aws.elb.delete_elb.assert_not_called()
        aws.elb.get_or_create_elb.assert_called_once_with(
            'mhcelb',
            health_check_url='/foo',
            hosted_zone_name='example.com',
            port_config=DiscoELBPortConfig(
                [
                    DiscoELBPortMapping(80, 'HTTP', 80, 'HTTP'),
                ]
            ),
            security_groups=['sg-1234abcd'], elb_public=False,
            sticky_app_cookie=None, subnets=['s-1234abcd', 's-1234abcd', 's-1234abcd'],
            elb_dns_alias=None,
            connection_draining_timeout=300, idle_timeout=300, testing=False,
            tags={
                'environment': 'unittestenv',
                'hostclass': 'mhcelb',
                'is_testing': '0',
                'productline': 'mock_productline'
            },
            cross_zone_load_balancing=True,
            cert_name=None
        )
예제 #2
0
 def test_create_scaling_schedule_no_sched(self, mock_config, **kwargs):
     """test create_scaling_schedule with only desired schedule"""
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME, discogroup=MagicMock())
     aws.create_scaling_schedule("1", "2", "5", hostclass="mhcboo")
     aws.discogroup.assert_has_calls([
         call.delete_all_recurring_group_actions(hostclass='mhcboo', group_name=None)
     ])
예제 #3
0
    def test_provision_hc_with_chaos_using_config(self, mock_config, **kwargs):
        """
        Provision creates the proper launch configuration and autoscaling group with chaos from config
        """
        config_dict = get_default_config_dict()
        config_dict["mhcunittest"]["chaos"] = "True"
        aws = DiscoAWS(config=get_mock_config(config_dict), environment_name=TEST_ENV_NAME,
                       log_metrics=MagicMock())
        mock_ami = self._get_image_mock(aws)
        aws.update_elb = MagicMock(return_value=None)
        aws.discogroup.elastigroup.spotinst_client = MagicMock()

        with patch("disco_aws_automation.DiscoAWS.get_meta_network", return_value=_get_meta_network_mock()):
            with patch("boto.ec2.connection.EC2Connection.get_all_snapshots", return_value=[]):
                with patch("disco_aws_automation.DiscoAWS.create_scaling_schedule", return_value=None):
                    with patch("boto.ec2.autoscale.AutoScaleConnection.create_or_update_tags",
                               return_value=None):
                        metadata = aws.provision(ami=mock_ami, hostclass="mhcunittest",
                                                 owner="unittestuser",
                                                 min_size=1, desired_size=1, max_size=1)

        self.assertEqual(metadata["hostclass"], "mhcunittest")
        self.assertFalse(metadata["no_destroy"])
        self.assertTrue(metadata["chaos"])
        _lc = aws.discogroup.get_configs()[0]
        self.assertRegexpMatches(_lc.name, r".*_mhcunittest_[0-9]*")
        self.assertEqual(_lc.image_id, mock_ami.id)
        self.assertTrue(aws.discogroup.get_existing_group(hostclass="mhcunittest"))
        _ag = aws.discogroup.get_existing_groups()[0]
        self.assertRegexpMatches(_ag['name'], r"unittestenv_mhcunittest_[0-9]*")
        self.assertEqual(_ag['min_size'], 1)
        self.assertEqual(_ag['max_size'], 1)
        self.assertEqual(_ag['desired_capacity'], 1)
예제 #4
0
 def test_wait_for_autoscaling_using_amiid(self, mock_config, **kwargs):
     '''test wait for autoscaling using the ami id to identify the instances'''
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     instances = [{"InstanceId": "i-123123aa"}]
     aws.instances_from_amis = MagicMock(return_value=instances)
     aws.wait_for_autoscaling('ami-12345678', 1)
     aws.instances_from_amis.assert_called_with(['ami-12345678'], group_name=None, launch_time=None)
예제 #5
0
 def test_wait_for_autoscaling_using_amiid(self, mock_config, **kwargs):
     '''test wait for autoscaling using the ami id to identify the instances'''
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     instances = [{"InstanceId": "i-123123aa"}]
     aws.instances_from_amis = MagicMock(return_value=instances)
     aws.wait_for_autoscaling('ami-12345678', 1)
     aws.instances_from_amis.assert_called_with(['ami-12345678'], group_name=None, launch_time=None)
예제 #6
0
    def test_provision_hostclass_sched_some_none(self, mock_config, **kwargs):
        """
        Provision creates the proper autoscaling group sizes with scheduled sizes
        """
        aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME, log_metrics=MagicMock())
        aws.update_elb = MagicMock(return_value=None)
        aws.discogroup.elastigroup.spotinst_client = MagicMock()
        aws.vpc.environment_class = None

        with patch("disco_aws_automation.DiscoAWS.get_meta_network", return_value=_get_meta_network_mock()):
            with patch("boto.ec2.connection.EC2Connection.get_all_snapshots", return_value=[]):
                with patch("disco_aws_automation.DiscoAWS.create_scaling_schedule", return_value=None):
                    with patch("boto.ec2.autoscale.AutoScaleConnection.create_or_update_tags",
                               return_value=None):
                        with patch("disco_aws_automation.DiscoELB.get_or_create_target_group",
                                   return_value="foobar"):
                            with patch("disco_aws_automation.DiscoAutoscale.update_tg",
                                       return_value=None):
                                aws.provision(ami=self._get_image_mock(aws),
                                              hostclass="mhcunittest", owner="unittestuser",
                                              min_size="",
                                              desired_size="2@1 0 * * *:3@6 0 * * *", max_size="")

        _ag = aws.discogroup.get_existing_groups()[0]
        print("({0}, {1}, {2})".format(_ag['min_size'], _ag['desired_capacity'], _ag['max_size']))
        self.assertEqual(_ag['min_size'], 0)  # minimum of listed sizes
        self.assertEqual(_ag['desired_capacity'], 3)  # maximum of listed sizes
        self.assertEqual(_ag['max_size'], 3)  # maximum of listed sizes
예제 #7
0
    def test_provision_hostclass_simple(self, mock_config, **kwargs):
        """
        Provision creates the proper launch configuration and autoscaling group
        """
        aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME, log_metrics=MagicMock())
        mock_ami = self._get_image_mock(aws)
        aws.update_elb = MagicMock(return_value=None)
        aws.discogroup.elastigroup.spotinst_client = MagicMock()
        aws.vpc.environment_class = None

        with patch("disco_aws_automation.DiscoAWS.get_meta_network", return_value=_get_meta_network_mock()):
            with patch("boto.ec2.connection.EC2Connection.get_all_snapshots", return_value=[]):
                with patch("disco_aws_automation.DiscoAWS.create_scaling_schedule", return_value=None):
                    with patch("boto.ec2.autoscale.AutoScaleConnection.create_or_update_tags",
                               return_value=None):
                        with patch("disco_aws_automation.DiscoELB.get_or_create_target_group",
                                   return_value="foobar"):
                            with patch("disco_aws_automation.DiscoAutoscale.update_tg",
                                       return_value=None):
                                metadata = aws.provision(ami=mock_ami, hostclass="mhcunittest",
                                                         owner="unittestuser",
                                                         min_size=1, desired_size=1, max_size=1)

        self.assertEqual(metadata["hostclass"], "mhcunittest")
        self.assertFalse(metadata["no_destroy"])
        self.assertTrue(metadata["chaos"])
        _lc = aws.discogroup.get_configs()[0]
        self.assertRegexpMatches(_lc.name, r".*_mhcunittest_[0-9]*")
        self.assertEqual(_lc.image_id, mock_ami.id)
        self.assertTrue(aws.discogroup.get_existing_group(hostclass="mhcunittest"))
        _ag = aws.discogroup.get_existing_groups()[0]
        self.assertRegexpMatches(_ag['name'], r"unittestenv_mhcunittest_[0-9]*")
        self.assertEqual(_ag['min_size'], 1)
        self.assertEqual(_ag['max_size'], 1)
        self.assertEqual(_ag['desired_capacity'], 1)
예제 #8
0
 def test_update_elb_delete(self, mock_config, **kwargs):
     '''Update ELB deletes ELBs that are no longer configured'''
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME, elb=MagicMock())
     aws.elb.get_elb = MagicMock(return_value=True)
     aws.elb.delete_elb = MagicMock()
     aws.update_elb("mhcfoo", update_autoscaling=False)
     aws.elb.delete_elb.assert_called_once_with("mhcfoo")
예제 #9
0
    def test_provision_hc_simple_with_no_chaos(self, mock_config, **kwargs):
        """
        Provision creates the proper launch configuration and autoscaling group with no chaos
        """
        aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
        mock_ami = self._get_image_mock(aws)
        aws.log_metrics = MagicMock()
        aws.update_elb = MagicMock(return_value=None)

        with patch("disco_aws_automation.DiscoAWS.get_meta_network", return_value=_get_meta_network_mock()):
            with patch("boto.ec2.connection.EC2Connection.get_all_snapshots", return_value=[]):
                with patch("disco_aws_automation.DiscoAWS.create_scaling_schedule", return_value=None):
                    with patch("boto.ec2.autoscale.AutoScaleConnection.create_or_update_tags",
                               return_value=None):
                        metadata = aws.provision(ami=mock_ami, hostclass="mhcunittest",
                                                 owner="unittestuser",
                                                 min_size=1, desired_size=1, max_size=1,
                                                 chaos="False")

        self.assertEqual(metadata["hostclass"], "mhcunittest")
        self.assertFalse(metadata["no_destroy"])
        self.assertFalse(metadata["chaos"])
        _lc = aws.autoscale.get_configs()[0]
        self.assertRegexpMatches(_lc.name, r".*_mhcunittest_[0-9]*")
        self.assertEqual(_lc.image_id, mock_ami.id)
        self.assertTrue(aws.autoscale.has_group("mhcunittest"))
        _ag = aws.autoscale.get_groups()[0]
        self.assertEqual(_ag.name, "unittestenv_mhcunittest")
        self.assertEqual(_ag.min_size, 1)
        self.assertEqual(_ag.max_size, 1)
        self.assertEqual(_ag.desired_capacity, 1)
예제 #10
0
 def test_create_scaling_schedule_no_sched(self, mock_config, **kwargs):
     """test create_scaling_schedule with only desired schedule"""
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     aws.autoscale = MagicMock()
     aws.create_scaling_schedule("mhcboo", "1", "2", "5")
     aws.autoscale.assert_has_calls(
         [call.delete_all_recurring_group_actions('mhcboo')])
예제 #11
0
def run():
    """Parses command line and dispatches the commands"""
    config = read_config()
    parser = get_parser()
    args = parser.parse_args()
    configure_logging(args.debug)

    environment_name = args.env or config.get("disco_aws", "default_environment")

    aws = DiscoAWS(config, environment_name=environment_name)
    if args.mode == "create":
        aws.disco_storage.create_ebs_snapshot(args.hostclass, args.size)
    elif args.mode == "list":
        for snapshot in aws.disco_storage.get_snapshots(args.hostclasses):
            print("{0:26} {1:13} {2:9} {3} {4:4}".format(
                snapshot.tags['hostclass'], snapshot.id, snapshot.status,
                snapshot.start_time, snapshot.volume_size))
    elif args.mode == "cleanup":
        aws.disco_storage.cleanup_ebs_snapshots(args.keep)
    elif args.mode == "capture":
        instances = instances_from_args(aws, args)
        if not instances:
            logging.warning("No instances found")
        for instance in instances:
            return_code, output = aws.remotecmd(
                instance, ["sudo /opt/wgen/bin/take_snapshot.sh"], user="******")
            if return_code:
                raise Exception("Failed to snapshot instance {0}:\n {1}\n".format(instance, output))
            logging.info("Successfully snapshotted %s", instance)
    elif args.mode == "delete":
        for snapshot_id in args.snapshots:
            aws.disco_storage.delete_snapshot(snapshot_id)
    elif args.mode == "update":
        snapshot = aws.disco_storage.get_latest_snapshot(args.hostclass)
        aws.autoscale.update_snapshot(args.hostclass, snapshot.id, snapshot.volume_size)
예제 #12
0
    def test_update_elb_all_defaults(self, mock_config, **kwargs):
        """
        update_elb calls get_or_create_elb with default port and protocol values if all are missing
        """
        aws = DiscoAWS(config=self._get_elb_config(), environment_name=TEST_ENV_NAME, elb=MagicMock())
        aws.elb.get_or_create_elb = MagicMock(return_value=MagicMock())
        aws.get_meta_network_by_name = _get_meta_network_mock()
        aws.elb.delete_elb = MagicMock()

        aws.update_elb("mhcelb", update_autoscaling=False)

        aws.elb.delete_elb.assert_not_called()
        aws.elb.get_or_create_elb.assert_called_once_with(
            'mhcelb',
            health_check_url='/foo',
            hosted_zone_name='example.com',
            port_config=DiscoELBPortConfig(
                [
                    DiscoELBPortMapping(80, 'HTTP', 80, 'HTTP'),
                ]
            ),
            security_groups=['sg-1234abcd'], elb_public=False,
            sticky_app_cookie=None, subnets=['s-1234abcd', 's-1234abcd', 's-1234abcd'],
            elb_dns_alias=None,
            connection_draining_timeout=300, idle_timeout=300, testing=False,
            tags={
                'environment': 'unittestenv',
                'hostclass': 'mhcelb',
                'is_testing': '0',
                'productline': 'mock_productline'
            },
            cross_zone_load_balancing=True,
            cert_name=None
        )
예제 #13
0
    def test_update_elb_create(self, mock_config, **kwargs):
        '''DiscoELB called to update or create ELB when one is configured'''
        aws = DiscoAWS(config=self._get_elb_config(),
                       environment_name=TEST_ENV_NAME)
        aws.elb.get_or_create_elb = MagicMock(return_value=MagicMock())
        aws.get_meta_network_by_name = _get_meta_network_mock()
        aws.elb.delete_elb = MagicMock()

        aws.update_elb("mhcelb", update_autoscaling=False)

        aws.elb.delete_elb.assert_not_called()
        aws.elb.get_or_create_elb.assert_called_once_with(
            'mhcelb',
            elb_port=80,
            health_check_url='/foo',
            hosted_zone_name='example.com',
            instance_port=80,
            elb_protocol='HTTP',
            instance_protocol='HTTP',
            security_groups=['sg-1234abcd'],
            elb_public=False,
            sticky_app_cookie=None,
            subnets=['s-1234abcd', 's-1234abcd', 's-1234abcd'],
            connection_draining_timeout=300,
            idle_timeout=300)
예제 #14
0
 def test_create_scaling_schedule_no_sched(self, mock_config, **kwargs):
     """test create_scaling_schedule with only desired schedule"""
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME, discogroup=MagicMock())
     aws.create_scaling_schedule("1", "2", "5", hostclass="mhcboo")
     aws.discogroup.assert_has_calls([
         call.delete_all_recurring_group_actions(hostclass='mhcboo', group_name=None)
     ])
예제 #15
0
 def test_update_elb_delete(self, mock_config, **kwargs):
     '''Update ELB deletes ELBs that are no longer configured'''
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME, elb=MagicMock())
     aws.elb.get_elb = MagicMock(return_value=True)
     aws.elb.delete_elb = MagicMock()
     aws.update_elb("mhcfoo", update_autoscaling=False)
     aws.elb.delete_elb.assert_called_once_with("mhcfoo")
예제 #16
0
 def test_create_scaling_schedule_mixed(self, mock_config, **kwargs):
     """test create_scaling_schedule with only desired schedule"""
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     aws.autoscale = MagicMock()
     aws.create_scaling_schedule("mhcboo", "1@1 0 * * *:2@7 0 * * *",
                                 "2@1 0 * * *:3@6 0 * * *",
                                 "6@2 0 * * *:9@6 0 * * *")
     aws.autoscale.assert_has_calls([
         call.delete_all_recurring_group_actions('mhcboo'),
         call.create_recurring_group_action('mhcboo',
                                            '1 0 * * *',
                                            min_size=1,
                                            desired_capacity=2,
                                            max_size=None),
         call.create_recurring_group_action('mhcboo',
                                            '2 0 * * *',
                                            min_size=None,
                                            desired_capacity=None,
                                            max_size=6),
         call.create_recurring_group_action('mhcboo',
                                            '6 0 * * *',
                                            min_size=None,
                                            desired_capacity=3,
                                            max_size=9),
         call.create_recurring_group_action('mhcboo',
                                            '7 0 * * *',
                                            min_size=2,
                                            desired_capacity=None,
                                            max_size=None)
     ],
                                    any_order=True)
예제 #17
0
    def test_create_userdata_without_spotinst(self, **kwargs):
        """
        create_userdata doesn't set 'spotinst' key
        """
        config_dict = get_default_config_dict()
        aws = DiscoAWS(config=get_mock_config(config_dict), environment_name=TEST_ENV_NAME)

        user_data = aws.create_userdata(hostclass="mhcunittest", owner="unittestuser", is_spotinst=False)
        self.assertEqual(user_data["is_spotinst"], "0")
예제 #18
0
 def test_instances_from_amis(self, mock_config, **kwargs):
     '''test get instances using ami ids '''
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     instance = create_autospec(boto.ec2.instance.Instance)
     instance.id = "i-123123aa"
     instances = [instance]
     aws.instances = MagicMock(return_value=instances)
     self.assertEqual(aws.instances_from_amis('ami-12345678'), instances)
     aws.instances.assert_called_with(filters={"image_id": 'ami-12345678'}, instance_ids=None)
예제 #19
0
 def test_instances_from_amis(self, mock_config, **kwargs):
     '''test get instances using ami ids '''
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     instance = create_autospec(boto.ec2.instance.Instance)
     instance.id = "i-123123aa"
     instances = [instance]
     aws.instances = MagicMock(return_value=instances)
     self.assertEqual(aws.instances_from_amis('ami-12345678'), instances)
     aws.instances.assert_called_with(filters={"image_id": 'ami-12345678'}, instance_ids=None)
예제 #20
0
def run():
    """Parses command line and dispatches the commands"""
    config = read_config()
    parser = get_parser()
    args = parser.parse_args()
    configure_logging(args.debug)

    environment_name = args.env or config.get("disco_aws",
                                              "default_environment")

    aws = DiscoAWS(config, environment_name=environment_name)
    if args.mode == "create":
        product_line = aws.hostclass_option_default(args.hostclass,
                                                    'product_line', 'unknown')
        aws.disco_storage.create_ebs_snapshot(args.hostclass, args.size,
                                              product_line,
                                              not args.unencrypted)
    elif args.mode == "list":
        for snapshot in aws.disco_storage.get_snapshots(args.hostclasses):
            print("{0:26} {1:13} {2:9} {3} {4:4}".format(
                snapshot.tags['hostclass'], snapshot.id, snapshot.status,
                snapshot.start_time, snapshot.volume_size))
    elif args.mode == "cleanup":
        aws.disco_storage.cleanup_ebs_snapshots(args.keep)
    elif args.mode == "capture":
        if args.volume_id:
            extra_snapshot_tags = None
            if args.tags:
                extra_snapshot_tags = dict(
                    tag_item.split(':') for tag_item in args.tags)
            snapshot_id = aws.disco_storage.take_snapshot(
                args.volume_id, snapshot_tags=extra_snapshot_tags)
            print("Successfully created snapshot: {0}".format(snapshot_id))
        else:
            instances = instances_from_args(aws, args)
            if not instances:
                print("No instances found")
            for instance in instances:
                return_code, output = aws.remotecmd(
                    instance, ["sudo /opt/wgen/bin/take_snapshot.sh"],
                    user="******")
                if return_code:
                    raise Exception(
                        "Failed to snapshot instance {0}:\n {1}\n".format(
                            instance, output))
                print("Successfully snapshotted {0}".format(instance))
    elif args.mode == "delete":
        for snapshot_id in args.snapshots:
            aws.disco_storage.delete_snapshot(snapshot_id)
    elif args.mode == "update":
        if args.snapshot_id:
            snapshot = aws.disco_storage.get_snapshot_from_id(args.snapshot_id)
        else:
            snapshot = aws.disco_storage.get_latest_snapshot(args.hostclass)
        aws.discogroup.update_snapshot(snapshot.id,
                                       snapshot.volume_size,
                                       hostclass=args.hostclass)
예제 #21
0
    def test_create_userdata_without_spotinst(self, **kwargs):
        """
        create_userdata doesn't set 'spotinst' key
        """
        config_dict = get_default_config_dict()
        aws = DiscoAWS(config=get_mock_config(config_dict), environment_name=TEST_ENV_NAME)

        user_data = aws.create_userdata(hostclass="mhcunittest", owner="unittestuser", is_spotinst=False)
        self.assertEqual(user_data["is_spotinst"], "0")
예제 #22
0
    def test_create_userdata_with_zookeeper(self, **kwargs):
        """
        create_userdata sets 'zookeepers' key
        """
        config_dict = get_default_config_dict()
        aws = DiscoAWS(config=get_mock_config(config_dict), environment_name=TEST_ENV_NAME)

        user_data = aws.create_userdata(hostclass="mhcunittest", owner="unittestuser")
        self.assertEqual(user_data["zookeepers"], "[\\\"mhczookeeper-{}.example.com:2181\\\"]".format(
            aws.vpc.environment_name))
예제 #23
0
    def test_create_userdata_with_zookeeper(self, **kwargs):
        """
        create_userdata sets 'zookeepers' key
        """
        config_dict = get_default_config_dict()
        aws = DiscoAWS(config=get_mock_config(config_dict), environment_name=TEST_ENV_NAME)

        user_data = aws.create_userdata(hostclass="mhcunittest", owner="unittestuser")
        self.assertEqual(user_data["zookeepers"], "[\\\"mhczookeeper-{}.example.com:2181\\\"]".format(
            aws.vpc.environment_name))
예제 #24
0
 def test_instances_from_amis_with_group_name(self, mock_config, **kwargs):
     '''test get instances using ami ids in a specified group name'''
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     instance = create_autospec(boto.ec2.instance.Instance)
     instance.id = "i-123123aa"
     instances = [instance]
     aws.instances_from_asgs = MagicMock(return_value=instances)
     aws.instances = MagicMock(return_value=instances)
     self.assertEqual(aws.instances_from_amis('ami-12345678', group_name='test_group'), instances)
     aws.instances_from_asgs.assert_called_with(['test_group'])
예제 #25
0
 def test_instances_from_amis_with_group_name(self, mock_config, **kwargs):
     '''test get instances using ami ids in a specified group name'''
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     instance = create_autospec(boto.ec2.instance.Instance)
     instance.id = "i-123123aa"
     instances = [instance]
     aws.instances_from_asgs = MagicMock(return_value=instances)
     aws.instances = MagicMock(return_value=instances)
     self.assertEqual(aws.instances_from_amis('ami-12345678', group_name='test_group'), instances)
     aws.instances_from_asgs.assert_called_with(['test_group'])
예제 #26
0
    def test_create_userdata_with_eip(self, **kwargs):
        """
        create_userdata sets 'eip' key when an EIP is required
        """
        config_dict = get_default_config_dict()
        eip = "54.201.250.76"
        config_dict["mhcunittest"]["eip"] = eip
        aws = DiscoAWS(config=get_mock_config(config_dict), environment_name=TEST_ENV_NAME)

        user_data = aws.create_userdata(hostclass="mhcunittest", owner="unittestuser")
        self.assertEqual(user_data["eip"], eip)
예제 #27
0
    def test_create_userdata_with_eip(self, **kwargs):
        """
        create_userdata sets 'eip' key when an EIP is required
        """
        config_dict = get_default_config_dict()
        eip = "54.201.250.76"
        config_dict["mhcunittest"]["eip"] = eip
        aws = DiscoAWS(config=get_mock_config(config_dict), environment_name=TEST_ENV_NAME)

        user_data = aws.create_userdata(hostclass="mhcunittest", owner="unittestuser")
        self.assertEqual(user_data["eip"], eip)
예제 #28
0
 def test_create_scaling_schedule_only_desired(self, mock_config, **kwargs):
     """test create_scaling_schedule with only desired schedule"""
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME, discogroup=MagicMock())
     aws.create_scaling_schedule("1", "2@1 0 * * *:3@6 0 * * *", "5", hostclass="mhcboo")
     aws.discogroup.assert_has_calls([
         call.delete_all_recurring_group_actions(hostclass='mhcboo', group_name=None),
         call.create_recurring_group_action('1 0 * * *', hostclass='mhcboo', group_name=None,
                                            min_size=None, desired_capacity=2, max_size=None),
         call.create_recurring_group_action('6 0 * * *', hostclass='mhcboo', group_name=None,
                                            min_size=None, desired_capacity=3, max_size=None)
     ], any_order=True)
예제 #29
0
 def test_create_scaling_schedule_only_desired(self, mock_config, **kwargs):
     """test create_scaling_schedule with only desired schedule"""
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME, discogroup=MagicMock())
     aws.create_scaling_schedule("1", "2@1 0 * * *:3@6 0 * * *", "5", hostclass="mhcboo")
     aws.discogroup.assert_has_calls([
         call.delete_all_recurring_group_actions(hostclass='mhcboo', group_name=None),
         call.create_recurring_group_action('1 0 * * *', hostclass='mhcboo', group_name=None,
                                            min_size=None, desired_capacity=2, max_size=None),
         call.create_recurring_group_action('6 0 * * *', hostclass='mhcboo', group_name=None,
                                            min_size=None, desired_capacity=3, max_size=None)
     ], any_order=True)
예제 #30
0
 def test_create_scaling_schedule_overlapping(self, mock_config, **kwargs):
     """test create_scaling_schedule with only desired schedule"""
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     aws.autoscale = MagicMock()
     aws.create_scaling_schedule("mhcboo",
                                 "1@1 0 * * *:2@6 0 * * *",
                                 "2@1 0 * * *:3@6 0 * * *",
                                 "6@1 0 * * *:9@6 0 * * *")
     aws.autoscale.assert_has_calls([
         call.delete_all_recurring_group_actions('mhcboo'),
         call.create_recurring_group_action('mhcboo', '1 0 * * *',
                                            min_size=1, desired_capacity=2, max_size=6),
         call.create_recurring_group_action('mhcboo', '6 0 * * *',
                                            min_size=2, desired_capacity=3, max_size=9)
     ], any_order=True)
예제 #31
0
def run():
    """Parses command line and dispatches the commands"""
    args = docopt(__doc__)

    configure_logging(args["--debug"])

    config = read_config()

    dry_run = args.get("--dry-run")
    delete = args.get("--delete")
    hostclass = args.get("--hostclass")
    env = args.get("--env") or config.get("disco_aws", "default_environment")
    alarms_config = DiscoAlarmsConfig(env)

    if args["update_notifications"]:
        notifications = alarms_config.get_notifications()
        DiscoSNS().update_sns_with_notifications(notifications,
                                                 env,
                                                 delete=delete,
                                                 dry_run=dry_run)
    elif args["update_metrics"]:
        if delete:
            DiscoAlarm().delete_hostclass_environment_alarms(env, hostclass)
        DiscoAWS(config, env).spinup_alarms([hostclass])
    elif args["list"]:
        alarms = DiscoAlarm().get_alarms({
            "env": env,
            "hostclass": hostclass
        } if hostclass else {"env": env})
        for alarm in alarms:
            print(alarm)
    else:
        logging.error("No command specified. See --help")
        sys.exit(1)
예제 #32
0
def run():
    """Parses command line and dispatches the commands"""
    args = docopt(__doc__)

    configure_logging(args["--debug"])

    config = read_config()

    env = args.get("--env") or config.get("disco_aws", "default_environment")
    vpc = DiscoVPC.fetch_environment(environment_name=env)
    if not vpc:
        print("Environment does not exist: {}".format(env))
        sys.exit(1)

    if args['list']:
        format_string = "{0:<50} {1:33} {2}"
        print(format_string.format("ELB Name", "Availability Zones", "ELB Id"),
              file=sys.stderr)
        for elb_info in sorted(DiscoELB(vpc).list_for_display()):
            print(
                format_string.format(elb_info['elb_name'],
                                     elb_info['availability_zones'],
                                     elb_info["elb_id"]))
    elif args['update']:
        DiscoAWS(config, env).update_elb(args['--hostclass'])
예제 #33
0
def run():
    """Parses command line and dispatches the commands"""
    args = docopt(__doc__)

    configure_logging(args["--debug"])

    config = read_config()

    env = args.get("--env") or config.get("disco_aws", "default_environment")
    vpc = DiscoVPC.fetch_environment(environment_name=env)
    if not vpc:
        print("Environment does not exist: {}".format(env))
        sys.exit(1)

    aws = DiscoAWS(config, env)
    disco_elasticache = DiscoElastiCache(vpc, aws=aws)

    if args['list']:
        for cluster in disco_elasticache.list():
            size = 'N/A'
            if cluster['Status'] == 'available':
                size = len(cluster['NodeGroups'][0]['NodeGroupMembers'])
            print("{0:<25} {1:5} {2:>5}".format(cluster['Description'],
                                                cluster['Status'], size))
    elif args['update']:
        if args['--cluster']:
            disco_elasticache.update(args['--cluster'])
        else:
            disco_elasticache.update_all()

    elif args['delete']:
        disco_elasticache.delete(args['--cluster'], wait=args['--wait'])
예제 #34
0
 def test_smoketest_once_is_terminated(self, mock_config, **kwargs):
     '''smoketest_once raises SmokeTestError if instance has terminated'''
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     with patch("disco_aws_automation.DiscoAWS.is_terminal_state",
                return_value=True):
         self.assertRaises(SmokeTestError, aws.smoketest_once,
                           self.instance)
예제 #35
0
    def test_provision_hostclass_sched_all_none(self, mock_config, **kwargs):
        """
        Provision creates the proper autoscaling group sizes with scheduled sizes
        """
        aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
        aws.log_metrics = MagicMock()
        aws.update_elb = MagicMock(return_value=None)

        with patch("disco_aws_automation.DiscoAWS.get_meta_network", return_value=_get_meta_network_mock()):
            with patch("boto.ec2.connection.EC2Connection.get_all_snapshots", return_value=[]):
                with patch("disco_aws_automation.DiscoAWS.create_scaling_schedule", return_value=None):
                    with patch("boto.ec2.autoscale.AutoScaleConnection.create_or_update_tags",
                               return_value=None):
                        aws.provision(ami=self._get_image_mock(aws),
                                      hostclass="mhcunittest", owner="unittestuser",
                                      min_size="", desired_size="", max_size="")

        _ag0 = aws.autoscale.get_groups()[0]

        self.assertEqual(_ag0.min_size, 0)  # minimum of listed sizes
        self.assertEqual(_ag0.desired_capacity, 0)  # maximum of listed sizes
        self.assertEqual(_ag0.max_size, 0)  # maximum of listed sizes

        with patch("disco_aws_automation.DiscoAWS.get_meta_network", return_value=_get_meta_network_mock()):
            with patch("boto.ec2.connection.EC2Connection.get_all_snapshots", return_value=[]):
                with patch("disco_aws_automation.DiscoAWS.create_scaling_schedule", return_value=None):
                    with patch("boto.ec2.autoscale.AutoScaleConnection.create_or_update_tags",
                               return_value=None):
                        aws.provision(ami=self._get_image_mock(aws),
                                      hostclass="mhcunittest", owner="unittestuser",
                                      min_size="3", desired_size="6", max_size="9")

        _ag1 = aws.autoscale.get_groups()[0]

        self.assertEqual(_ag1.min_size, 3)  # minimum of listed sizes
        self.assertEqual(_ag1.desired_capacity, 6)  # maximum of listed sizes
        self.assertEqual(_ag1.max_size, 9)  # maximum of listed sizes

        with patch("disco_aws_automation.DiscoAWS.get_meta_network", return_value=_get_meta_network_mock()):
            with patch("boto.ec2.connection.EC2Connection.get_all_snapshots", return_value=[]):
                with patch("disco_aws_automation.DiscoAWS.create_scaling_schedule", return_value=None):
                    with patch("boto.ec2.autoscale.AutoScaleConnection.create_or_update_tags",
                               return_value=None):
                        aws.provision(ami=self._get_image_mock(aws),
                                      hostclass="mhcunittest", owner="unittestuser",
                                      min_size="", desired_size="", max_size="")

        _ag2 = aws.autoscale.get_groups()[0]

        self.assertEqual(_ag2.min_size, 3)  # minimum of listed sizes
        self.assertEqual(_ag2.desired_capacity, 6)  # maximum of listed sizes
        self.assertEqual(_ag2.max_size, 9)  # maximum of listed sizes
예제 #36
0
def run():
    """Parses command line and dispatches the commands"""
    config = read_config()
    parser = get_parser()
    args = parser.parse_args()
    configure_logging(args.debug)

    environment_name = args.env or config.get("disco_aws", "default_environment")

    aws = DiscoAWS(config, environment_name=environment_name)
    if args.mode == "create":
        product_line = aws.hostclass_option_default(args.hostclass, 'product_line', 'unknown')
        aws.disco_storage.create_ebs_snapshot(args.hostclass, args.size, product_line, not args.unencrypted)
    elif args.mode == "list":
        for snapshot in aws.disco_storage.get_snapshots(args.hostclasses):
            print("{0:26} {1:13} {2:9} {3} {4:4}".format(
                snapshot.tags['hostclass'], snapshot.id, snapshot.status,
                snapshot.start_time, snapshot.volume_size))
    elif args.mode == "cleanup":
        aws.disco_storage.cleanup_ebs_snapshots(args.keep)
    elif args.mode == "capture":
        if args.volume_id:
            extra_snapshot_tags = None
            if args.tags:
                extra_snapshot_tags = dict(tag_item.split(':') for tag_item in args.tags)
            snapshot_id = aws.disco_storage.take_snapshot(args.volume_id, snapshot_tags=extra_snapshot_tags)
            print("Successfully created snapshot: {0}".format(snapshot_id))
        else:
            instances = instances_from_args(aws, args)
            if not instances:
                print("No instances found")
            for instance in instances:
                return_code, output = aws.remotecmd(
                    instance, ["sudo /opt/wgen/bin/take_snapshot.sh"], user="******")
                if return_code:
                    raise Exception("Failed to snapshot instance {0}:\n {1}\n".format(instance, output))
                print("Successfully snapshotted {0}".format(instance))
    elif args.mode == "delete":
        for snapshot_id in args.snapshots:
            aws.disco_storage.delete_snapshot(snapshot_id)
    elif args.mode == "update":
        if args.snapshot_id:
            snapshot = aws.disco_storage.get_snapshot_from_id(args.snapshot_id)
        else:
            snapshot = aws.disco_storage.get_latest_snapshot(args.hostclass)
        aws.discogroup.update_snapshot(snapshot.id, snapshot.volume_size, hostclass=args.hostclass)
예제 #37
0
    def test_instances_from_amis_with_launch_date(self, mock_config, **kwargs):
        '''test get instances using ami ids and with date after a specified date time'''
        aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
        now = datetime.utcnow()

        instance1 = create_autospec(boto.ec2.instance.Instance)
        instance1.id = "i-123123aa"
        instance1.launch_time = str(now + timedelta(minutes=10))
        instance2 = create_autospec(boto.ec2.instance.Instance)
        instance2.id = "i-123123ff"
        instance2.launch_time = str(now - timedelta(days=1))
        instances = [instance1, instance2]

        aws.instances = MagicMock(return_value=instances)
        self.assertEqual(aws.instances_from_amis('ami-12345678', launch_time=now),
                         [instance1])
        aws.instances.assert_called_with(filters={"image_id": 'ami-12345678'}, instance_ids=None)
예제 #38
0
    def test_update_elb_create(self, mock_config, **kwargs):
        '''DiscoELB called to update or create ELB when one is configured'''
        aws = DiscoAWS(config=self._get_elb_config(), environment_name=TEST_ENV_NAME)
        aws.elb.get_or_create_elb = MagicMock(return_value=MagicMock())
        aws.get_meta_network_by_name = _get_meta_network_mock()
        aws.elb.delete_elb = MagicMock()

        aws.update_elb("mhcelb", update_autoscaling=False)

        aws.elb.delete_elb.assert_not_called()
        aws.elb.get_or_create_elb.assert_called_once_with(
            'mhcelb', elb_port=80, health_check_url='/foo',
            hosted_zone_name='example.com', instance_port=80,
            elb_protocol='HTTP', instance_protocol='HTTP',
            security_groups=['sg-1234abcd'], elb_public=False,
            sticky_app_cookie=None, subnets=['s-1234abcd', 's-1234abcd', 's-1234abcd'],
            connection_draining_timeout=300, idle_timeout=300)
예제 #39
0
    def test_instances_from_amis_with_launch_date(self, mock_config, **kwargs):
        '''test get instances using ami ids and with date after a specified date time'''
        aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
        now = datetime.utcnow()

        instance1 = create_autospec(boto.ec2.instance.Instance)
        instance1.id = "i-123123aa"
        instance1.launch_time = str(now + timedelta(minutes=10))
        instance2 = create_autospec(boto.ec2.instance.Instance)
        instance2.id = "i-123123ff"
        instance2.launch_time = str(now - timedelta(days=1))
        instances = [instance1, instance2]

        aws.instances = MagicMock(return_value=instances)
        self.assertEqual(aws.instances_from_amis('ami-12345678', launch_time=now),
                         [instance1])
        aws.instances.assert_called_with(filters={"image_id": 'ami-12345678'}, instance_ids=None)
예제 #40
0
    def test_update_elb_mismatch(self, mock_config, **kwargs):
        """
        update_elb sets instance=ELB when given mismatched numbers of instance and ELB ports
        """
        overrides = {
            'elb_instance_port': '80, 9001',
            'elb_instance_protocol': 'HTTP, HTTP',
            'elb_port': '443, 80, 9002',
            'elb_protocol': 'HTTPS, HTTP, HTTP'
        }
        aws = DiscoAWS(
            config=self._get_elb_config(overrides),
            environment_name=TEST_ENV_NAME,
            elb=MagicMock()
        )
        aws.elb.get_or_create_elb = MagicMock(return_value=MagicMock())
        aws.get_meta_network_by_name = _get_meta_network_mock()
        aws.elb.delete_elb = MagicMock()
        aws.update_elb("mhcelb", update_autoscaling=False)

        aws.elb.delete_elb.assert_not_called()
        aws.elb.get_or_create_elb.assert_called_once_with(
            'mhcelb',
            health_check_url='/foo',
            hosted_zone_name='example.com',
            port_config=DiscoELBPortConfig(
                [
                    DiscoELBPortMapping(80, 'HTTP', 443, 'HTTPS'),
                    DiscoELBPortMapping(9001, 'HTTP', 80, 'HTTP'),
                    DiscoELBPortMapping(9002, 'HTTP', 9002, 'HTTP')
                ]
            ),
            security_groups=['sg-1234abcd'], elb_public=False,
            sticky_app_cookie=None, subnets=['s-1234abcd', 's-1234abcd', 's-1234abcd'],
            elb_dns_alias=None,
            connection_draining_timeout=300, idle_timeout=300, testing=False,
            tags={
                'environment': 'unittestenv',
                'hostclass': 'mhcelb',
                'is_testing': '0',
                'productline': 'mock_productline'
            },
            cross_zone_load_balancing=True,
            cert_name=None
        )
예제 #41
0
    def test_update_elb_mismatch(self, mock_config, **kwargs):
        """
        update_elb sets instance=ELB when given mismatched numbers of instance and ELB ports
        """
        overrides = {
            'elb_instance_port': '80, 9001',
            'elb_instance_protocol': 'HTTP, HTTP',
            'elb_port': '443, 80, 9002',
            'elb_protocol': 'HTTPS, HTTP, HTTP'
        }
        aws = DiscoAWS(
            config=self._get_elb_config(overrides),
            environment_name=TEST_ENV_NAME,
            elb=MagicMock()
        )
        aws.elb.get_or_create_elb = MagicMock(return_value=MagicMock())
        aws.get_meta_network_by_name = _get_meta_network_mock()
        aws.elb.delete_elb = MagicMock()
        aws.update_elb("mhcelb", update_autoscaling=False)

        aws.elb.delete_elb.assert_not_called()
        aws.elb.get_or_create_elb.assert_called_once_with(
            'mhcelb',
            health_check_url='/foo',
            hosted_zone_name='example.com',
            port_config=DiscoELBPortConfig(
                [
                    DiscoELBPortMapping(80, 'HTTP', 443, 'HTTPS'),
                    DiscoELBPortMapping(9001, 'HTTP', 80, 'HTTP'),
                    DiscoELBPortMapping(9002, 'HTTP', 9002, 'HTTP')
                ]
            ),
            security_groups=['sg-1234abcd'], elb_public=False,
            sticky_app_cookie=None, subnets=['s-1234abcd', 's-1234abcd', 's-1234abcd'],
            elb_dns_alias=None,
            connection_draining_timeout=300, idle_timeout=300, testing=False,
            tags={
                'environment': 'unittestenv',
                'hostclass': 'mhcelb',
                'is_testing': '0',
                'productline': 'mock_productline'
            },
            cross_zone_load_balancing=True,
            cert_name=None
        )
예제 #42
0
 def test_smoketest_once_no_instance(self, mock_config, **kwargs):
     '''smoketest_once Converts instance not found to TimeoutError'''
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     self.instance.update = MagicMock(side_effect=EC2ResponseError(
         400, "Bad Request",
         body={
             "RequestID": "df218052-63f2-4a11-820f-542d97d078bd",
             "Error": {"Code": "InvalidInstanceID.NotFound", "Message": "test"}}))
     self.assertRaises(TimeoutError, aws.smoketest_once, self.instance)
예제 #43
0
 def test_smoketest_once_passes_exception(self, mock_config, **kwargs):
     '''smoketest_once passes random EC2ResponseErrors'''
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     self.instance.update = MagicMock(side_effect=EC2ResponseError(
         400, "Bad Request",
         body={
             "RequestID": "df218052-63f2-4a11-820f-542d97d078bd",
             "Error": {"Code": "Throttled", "Message": "test"}}))
     self.assertRaises(EC2ResponseError, aws.smoketest_once, self.instance)
예제 #44
0
    def test_provision_hc_with_chaos_using_config(self, mock_config, **kwargs):
        """
        Provision creates the proper launch configuration and autoscaling group with chaos from config
        """
        config_dict = get_default_config_dict()
        config_dict["mhcunittest"]["chaos"] = "True"
        aws = DiscoAWS(config=get_mock_config(config_dict),
                       environment_name=TEST_ENV_NAME)
        mock_ami = self._get_image_mock(aws)
        aws.log_metrics = MagicMock()
        aws.update_elb = MagicMock(return_value=None)

        with patch("disco_aws_automation.DiscoAWS.get_meta_network",
                   return_value=_get_meta_network_mock()):
            with patch("boto.ec2.connection.EC2Connection.get_all_snapshots",
                       return_value=[]):
                with patch(
                        "disco_aws_automation.DiscoAWS.create_scaling_schedule",
                        return_value=None):
                    with patch(
                            "boto.ec2.autoscale.AutoScaleConnection.create_or_update_tags",
                            return_value=None):
                        metadata = aws.provision(ami=mock_ami,
                                                 hostclass="mhcunittest",
                                                 owner="unittestuser",
                                                 min_size=1,
                                                 desired_size=1,
                                                 max_size=1)

        self.assertEqual(metadata["hostclass"], "mhcunittest")
        self.assertFalse(metadata["no_destroy"])
        self.assertTrue(metadata["chaos"])
        _lc = aws.autoscale.get_configs()[0]
        self.assertRegexpMatches(_lc.name, r".*_mhcunittest_[0-9]*")
        self.assertEqual(_lc.image_id, mock_ami.id)
        self.assertTrue(aws.autoscale.has_group("mhcunittest"))
        _ag = aws.autoscale.get_groups()[0]
        self.assertEqual(_ag.name, "unittestenv_mhcunittest")
        self.assertEqual(_ag.min_size, 1)
        self.assertEqual(_ag.max_size, 1)
        self.assertEqual(_ag.desired_capacity, 1)
예제 #45
0
    def test_provision_hostclass_schedules(self, mock_config, **kwargs):
        """
        Provision creates the proper autoscaling group sizes with scheduled sizes
        """
        aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
        aws.log_metrics = MagicMock()
        aws.update_elb = MagicMock(return_value=None)

        with patch("disco_aws_automation.DiscoAWS.get_meta_network",
                   return_value=_get_meta_network_mock()):
            with patch("boto.ec2.connection.EC2Connection.get_all_snapshots",
                       return_value=[]):
                with patch(
                        "disco_aws_automation.DiscoAWS.create_scaling_schedule",
                        return_value=None):
                    with patch(
                            "boto.ec2.autoscale.AutoScaleConnection.create_or_update_tags",
                            return_value=None):
                        aws.provision(ami=self._get_image_mock(aws),
                                      hostclass="mhcunittest",
                                      owner="unittestuser",
                                      min_size="1@1 0 * * *:2@6 0 * * *",
                                      desired_size="2@1 0 * * *:3@6 0 * * *",
                                      max_size="6@1 0 * * *:9@6 0 * * *")

        _ag = aws.autoscale.get_groups()[0]
        self.assertEqual(_ag.min_size, 1)  # minimum of listed sizes
        self.assertEqual(_ag.desired_capacity, 3)  # maximum of listed sizes
        self.assertEqual(_ag.max_size, 9)  # maximum of listed sizes
예제 #46
0
def run():
    """Parses command line and dispatches the commands"""
    config = read_config()
    parser = get_parser()
    args = parser.parse_args()
    configure_logging(args.debug)

    environment_name = args.env or config.get("disco_aws",
                                              "default_environment")

    aws = DiscoAWS(config, environment_name=environment_name)
    if args.mode == "create":
        aws.disco_storage.create_ebs_snapshot(args.hostclass, args.size)
    elif args.mode == "list":
        for snapshot in aws.disco_storage.get_snapshots(args.hostclasses):
            print("{0:26} {1:13} {2:9} {3} {4:4}".format(
                snapshot.tags['hostclass'], snapshot.id, snapshot.status,
                snapshot.start_time, snapshot.volume_size))
    elif args.mode == "cleanup":
        aws.disco_storage.cleanup_ebs_snapshots(args.keep)
    elif args.mode == "capture":
        instances = instances_from_args(aws, args)
        if not instances:
            logging.warning("No instances found")
        for instance in instances:
            return_code, output = aws.remotecmd(
                instance, ["sudo /opt/wgen/bin/take_snapshot.sh"],
                user="******")
            if return_code:
                raise Exception(
                    "Failed to snapshot instance {0}:\n {1}\n".format(
                        instance, output))
            logging.info("Successfully snapshotted %s", instance)
    elif args.mode == "delete":
        for snapshot_id in args.snapshots:
            aws.disco_storage.delete_snapshot(snapshot_id)
    elif args.mode == "update":
        snapshot = aws.disco_storage.get_latest_snapshot(args.hostclass)
        aws.autoscale.update_snapshot(args.hostclass, snapshot.id,
                                      snapshot.volume_size)
예제 #47
0
    def test_provision_hostclass_sched_some_none(self, mock_config, **kwargs):
        """
        Provision creates the proper autoscaling group sizes with scheduled sizes
        """
        aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME, log_metrics=MagicMock())
        aws.update_elb = MagicMock(return_value=None)
        aws.discogroup.elastigroup.spotinst_client = MagicMock()

        with patch("disco_aws_automation.DiscoAWS.get_meta_network", return_value=_get_meta_network_mock()):
            with patch("boto.ec2.connection.EC2Connection.get_all_snapshots", return_value=[]):
                with patch("disco_aws_automation.DiscoAWS.create_scaling_schedule", return_value=None):
                    with patch("boto.ec2.autoscale.AutoScaleConnection.create_or_update_tags",
                               return_value=None):
                        aws.provision(ami=self._get_image_mock(aws),
                                      hostclass="mhcunittest", owner="unittestuser",
                                      min_size="",
                                      desired_size="2@1 0 * * *:3@6 0 * * *", max_size="")

        _ag = aws.discogroup.get_existing_groups()[0]
        print("({0}, {1}, {2})".format(_ag['min_size'], _ag['desired_capacity'], _ag['max_size']))
        self.assertEqual(_ag['min_size'], 0)  # minimum of listed sizes
        self.assertEqual(_ag['desired_capacity'], 3)  # maximum of listed sizes
        self.assertEqual(_ag['max_size'], 3)  # maximum of listed sizes
예제 #48
0
def run():
    """Parses command line and dispatches the commands"""
    args = docopt(__doc__)

    configure_logging(args["--debug"])

    config = read_config()

    env = args.get("--env") or config.get("disco_aws", "default_environment")
    vpc = DiscoVPC.fetch_environment(environment_name=env)
    if not vpc:
        print("Environment does not exist: {}".format(env))
        sys.exit(1)

    if args['list']:
        for elb in sorted(DiscoELB(vpc).list()):
            print("{0:<20} {1:25}".format(elb['LoadBalancerName'],
                                          ','.join(elb['AvailabilityZones'])))
    elif args['update']:
        DiscoAWS(config, env).update_elb(args['--hostclass'])
예제 #49
0
 def test_is_running_running(self, mock_config, **kwargs):
     '''is_running returns true for running instance'''
     self.assertTrue(DiscoAWS.is_running(self.instance))
예제 #50
0
 def test_size_as_rec_map_with_duped_map(self):
     """_size_as_recurrence_map works with a duped map"""
     map_as_string = "2@1 0 * * *:3@6 0 * * *:3@6 0 * * *"
     map_as_dict = {"1 0 * * *": 2, "6 0 * * *": 3}
     self.assertEqual(DiscoAWS._size_as_recurrence_map(map_as_string), map_as_dict)
예제 #51
0
def run():
    """Parses command line and dispatches the commands"""
    config = read_config()

    args = docopt(__doc__)

    configure_logging(args["--debug"])

    env = args["--environment"] or config.get("disco_aws",
                                              "default_environment")

    force_deployable = None if args["--deployable"] is None else is_truthy(
        args["--deployable"])

    pipeline_definition = []
    if args["--pipeline"]:
        with open(args["--pipeline"], "r") as f:
            reader = csv.DictReader(f)
            pipeline_definition = [line for line in reader]

    aws = DiscoAWS(config, env)

    if config.has_option('test', 'env'):
        test_env = config.get('test', 'env')
        test_aws = DiscoAWS(config, test_env)
    else:
        test_aws = aws

    bake = DiscoBake(config, aws.connection)

    if args["--ami"] and args["--hostclass"]:
        image = bake.get_image(args["--ami"])
        if args["--hostclass"] != bake.ami_hostclass(image):
            logger.error('AMI %s does not belong to hostclass %s',
                         args["--ami"], args["--hostclass"])
            sys.exit(1)

    vpc = DiscoVPC.fetch_environment(environment_name=env)

    deploy = DiscoDeploy(aws,
                         test_aws,
                         bake,
                         DiscoGroup(env),
                         DiscoELB(vpc),
                         DiscoSSM(environment_name=env),
                         pipeline_definition=pipeline_definition,
                         ami=args.get("--ami"),
                         hostclass=args.get("--hostclass"),
                         allow_any_hostclass=args["--allow-any-hostclass"])

    if args["test"]:
        try:
            deploy.test(dry_run=args["--dry-run"],
                        deployment_strategy=args["--strategy"],
                        ticket_id=args["--ticket"],
                        force_deployable=force_deployable)
        except RuntimeError as err:
            logger.error(str(err))
            sys.exit(1)
    elif args["update"]:
        try:
            deploy.update(dry_run=args["--dry-run"],
                          deployment_strategy=args["--strategy"],
                          ticket_id=args["--ticket"],
                          force_deployable=force_deployable)
        except RuntimeError as err:
            logger.error(str(err))
            sys.exit(1)
    elif args["list"]:
        missing = "-" if pipeline_definition else ""
        if args["--tested"]:
            for (_hostclass,
                 ami) in deploy.get_latest_tested_amis().iteritems():
                print("{} {:40} {}".format(
                    ami.id,
                    ami.name.split()[0],
                    deploy.get_integration_test(ami.name.split()[0])
                    or missing))
        elif args["--untested"]:
            for (_hostclass,
                 ami) in deploy.get_latest_untested_amis().iteritems():
                print("{} {:40} {}".format(
                    ami.id,
                    ami.name.split()[0],
                    deploy.get_integration_test(ami.name.split()[0])
                    or missing))
        elif args["--failed"]:
            for (_hostclass,
                 ami) in deploy.get_latest_failed_amis().iteritems():
                print("{} {:40} {}".format(
                    ami.id,
                    ami.name.split()[0],
                    deploy.get_integration_test(ami.name.split()[0])
                    or missing))
        elif args["--testable"]:
            for ami in deploy.get_test_amis():
                print("{} {:40} {}".format(
                    ami.id,
                    ami.name.split()[0],
                    deploy.get_integration_test(ami.name.split()[0])
                    or missing))
        elif args["--updatable"]:
            for ami in deploy.get_update_amis():
                print("{} {:40} {}".format(
                    ami.id,
                    ami.name.split()[0],
                    deploy.get_integration_test(ami.name.split()[0])
                    or missing))
        elif args["--failures"]:
            failures = deploy.get_failed_amis()
            for ami in failures:
                print("{} {:40} {}".format(
                    ami.id,
                    ami.name.split()[0],
                    deploy.get_integration_test(ami.name.split()[0])
                    or missing))
            sys.exit(1 if failures else 0)
예제 #52
0
 def aws(self):
     """Lazily creates a DiscoAWS object"""
     if not self._aws:
         self._aws = DiscoAWS(self.config, self.env)
     return self._aws
예제 #53
0
 def test_create_scaling_schedule_no_sched(self, mock_config, **kwargs):
     """test create_scaling_schedule with only desired schedule"""
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     aws.autoscale = MagicMock()
     aws.create_scaling_schedule("mhcboo", "1", "2", "5")
     aws.autoscale.assert_has_calls([call.delete_all_recurring_group_actions('mhcboo')])
예제 #54
0
 def test_is_terminal_state_updates(self, mock_config, **kwargs):
     '''is_terminal_state calls instance update'''
     DiscoAWS.is_terminal_state(self.instance)
     self.assertEqual(self.instance.update.call_count, 1)
예제 #55
0
 def test_smoketest_all_good(self, mock_config, **kwargs):
     '''smoketest_once raises TimeoutError if instance is not tagged as smoketested'''
     aws = DiscoAWS(config=mock_config, environment_name=TEST_ENV_NAME)
     self.instance.tags.get = MagicMock(return_value="100")
     self.assertTrue(aws.smoketest_once(self.instance))
예제 #56
0
 def test_is_terminal_state_termianted(self, mock_config, **kwargs):
     '''is_terminal_state returns true if instance has terminated or failed to start'''
     self.instance.state = "terminated"
     self.assertTrue(DiscoAWS.is_terminal_state(self.instance))
     self.instance.state = "failed"
     self.assertTrue(DiscoAWS.is_terminal_state(self.instance))
예제 #57
0
 def test_is_terminal_state_running(self, mock_config, **kwargs):
     '''is_terminal_state returns false for running instance'''
     self.assertFalse(DiscoAWS.is_terminal_state(self.instance))
예제 #58
0
 def test_is_running_updates(self, mock_config, **kwargs):
     '''is_running calls instance update'''
     DiscoAWS.is_running(self.instance)
     self.assertEqual(self.instance.update.call_count, 1)
예제 #59
0
 def test_is_running_termianted(self, mock_config, **kwargs):
     '''is_running returns false if instance has terminated'''
     self.instance.state = "terminated"
     self.assertFalse(DiscoAWS.is_running(self.instance))
예제 #60
0
def run():
    """Parses command line and dispatches the commands"""
    config = read_config()

    parser = get_parser()
    args = parser.parse_args()
    configure_logging(args.debug)

    environment_name = args.env or config.get("disco_aws", "default_environment")

    aws = DiscoAWS(config, environment_name=environment_name)
    if args.mode == "provision":
        hostclass_dicts = [{
            "sequence": 1,
            "hostclass": args.hostclass,
            "instance_type": args.instance_type,
            "extra_space": args.extra_space,
            "extra_disk": args.extra_disk,
            "iops": args.iops,
            "smoke_test": "no" if args.no_smoke else "yes",
            "ami": args.ami,
            "min_size": args.min_size,
            "desired_size": args.desired_size,
            "max_size": args.max_size,
            "chaos": "no" if args.no_chaos else None,
            "spotinst": args.spotinst,
            "spotinst_reserve": args.spotinst_reserve
        }]
        aws.spinup(hostclass_dicts, testing=args.testing)
    elif args.mode == "listhosts":
        instances = aws.instances_from_hostclass(args.hostclass) if args.hostclass else aws.instances()
        instances_filtered = [i for i in instances if i.state != u"terminated"]
        instances_sorted = sorted(instances_filtered, key=lambda i: (i.state, i.tags.get("hostclass", "-"),
                                                                     i.tags.get("hostname", "-")))
        instance_to_private_ip = {i.id: get_preferred_private_ip(i) for i in instances_sorted}
        most = args.all or args.most

        if args.ami_age or args.uptime or most:
            bake = DiscoBake(config, aws.connection)
            ami_dict = bake.list_amis_by_instance(instances)
            now = datetime.utcnow()

        for instance in instances_sorted:
            line = u"{0} {1:<30} {2:<15}".format(
                instance.id, instance.tags.get("hostclass", "-"),
                instance.ip_address or instance_to_private_ip[instance.id])
            if args.state or most:
                line += u" {0:<10}".format(instance.state)
            if args.hostname or most:
                line += u" {0:<1}".format("-" if instance.tags.get("hostname") is None else "y")
            if args.owner or most:
                line += u" {0:<11}".format(instance.tags.get("owner", u"-"))
            if args.instance_type or most:
                line += u" {0:<10}".format(instance.instance_type)
            if args.ami or most:
                line += u" {0:<12}".format(instance.image_id)
            if args.smoke or most:
                line += u" {0:<1}".format("-" if instance.tags.get("smoketest") is None else "y")
            if args.ami_age or most:
                creation_time = bake.get_ami_creation_time(ami_dict.get(instance.id))
                line += u" {0:<4}".format(DiscoBake.time_diff_in_hours(now, creation_time))
            if args.uptime or most:
                launch_time = dateutil_parser.parse(instance.launch_time)
                now_with_tz = now.replace(tzinfo=launch_time.tzinfo)  # use a timezone-aware `now`
                line += u" {0:<3}".format(DiscoBake.time_diff_in_hours(now_with_tz, launch_time))
            if args.private_ip or args.all:
                line += u" {0:<16}".format(instance_to_private_ip[instance.id])
            if args.availability_zone or args.all:
                line += u" {0:<12}".format(instance.placement)
            if args.productline or args.all:
                productline = instance.tags.get("productline", u"unknown")
                line += u" {0:<15}".format(productline if productline != u"unknown" else u"-")
            if args.securitygroup or args.all:
                line += u" {0:15}".format(instance.groups[0].name)
            print(line)

    elif args.mode == "terminate":
        instances = instances_from_args(aws, args)
        terminated_instances = aws.terminate(instances)
        print("Terminated: {0}".format(",".join([str(inst) for inst in terminated_instances])))
    elif args.mode == "stop":
        instances = instances_from_args(aws, args)
        stopped_instances = aws.stop(instances)
        print("Stopped: {0}".format(",".join([str(inst) for inst in stopped_instances])))
    elif args.mode == "exec":
        instances = instances_from_args(aws, args)
        exit_code = 0
        for instance in instances:
            _code, _stdout = aws.remotecmd(instance, [args.command], user=args.user, nothrow=True)
            sys.stdout.write(_stdout)
            exit_code = _code if _code else exit_code
        sys.exit(exit_code)
    elif args.mode == "exec-ssm":
        ssm = DiscoSSM(environment_name)
        if args.parameters:
            parsed_parameters = parse_ssm_parameters(args.parameters)
        else:
            parsed_parameters = None
        instances = [instance.id for instance in instances_from_args(aws, args)]
        if ssm.execute(instances, args.document, parameters=parsed_parameters, comment=args.comment):
            sys.exit(0)
        else:
            sys.exit(1)
    elif args.mode == "isready":
        instances = instances_from_args(aws, args)
        if not instances:
            print("No instances found")
        ready_count = 0
        for instance in instances:
            name = "{0} {1}".format(instance.tags.get("hostname"), instance.id)
            print("Checking {0}...".format(name))
            try:
                aws.smoketest_once(instance)
                print("...{0} is ready".format(name))
                ready_count += 1
            except SmokeTestError:
                print("..{0} failed smoke test".format(name))
            except TimeoutError:
                print("...{0} is NOT ready".format(name))
        sys.exit(0 if ready_count == len(instances) else 1)
    elif args.mode == "tag":
        for instance in aws.instances(instance_ids=args.instances):
            instance.remove_tag(args.key)
            if args.value:
                instance.add_tag(args.key, args.value)
    elif args.mode == "spinup":
        hostclass_dicts = read_pipeline_file(args.pipeline_definition_file)
        aws.spinup(hostclass_dicts, stage=args.stage, no_smoke=args.no_smoke, testing=args.testing)
    elif args.mode == "spindown":
        hostclasses = [line["hostclass"] for line in read_pipeline_file(args.pipeline_definition_file)]
        aws.spindown(hostclasses)
    elif args.mode == "spindownandup":
        hostclass_dicts = read_pipeline_file(args.pipeline_definition_file)
        hostclasses = [d["hostclass"] for d in hostclass_dicts]
        aws.spindown(hostclasses)
        aws.spinup(hostclass_dicts)
    elif args.mode == "gethostclassoption":
        try:
            print(aws.hostclass_option(args.hostclass, args.option))
        except NoOptionError:
            print("Hostclass %s doesn't have option %s." % (args.hostclass, args.option))
    elif args.mode == "promoterunning":
        aws.promote_running_instances_to_prod(args.hours * 60 * 60)