def delete_node(self, node_ip):
        lgr.debug('getting node for if {0}'.format(node_ip))
        node = [node for node in self.cloud_driver.list_nodes() if
                node_ip in node.public_ips][0]

        lgr.debug('destroying node {0}'.format(node))
        self.cloud_driver.destroy_node(node)
    def delete_security_groups(self):

        mgmt_security_group_name = self.get_mgmt_security_group_name()
        lgr.debug('deleting management security-group {0}'.format(
            mgmt_security_group_name))
        try:
            self.cloud_driver.ex_delete_security_group(
                mgmt_security_group_name)
        except:
            lgr.warn(
                'management security-group {0} may not have been deleted'
                .format(mgmt_security_group_name))
            pass

        agents_security_group_name = self._get_agent_security_group_name()
        lgr.debug('deleting agents security-group {0}'.format(
            agents_security_group_name))
        try:
            self.cloud_driver.ex_delete_security_group(
                agents_security_group_name)
        except:
            lgr.warn(
                'agent security-group {0} may not have been deleted'.format(
                    agents_security_group_name))
            pass
    def _add_rule(self, security_group_name,
                  protocol, cidr_list, start_port,
                  end_port=None):

        lgr.debug('creating security-group rule for {0} with details {1}'
                  .format(security_group_name, locals().values()))
        self.cloud_driver.ex_authorize_security_group_ingress(
            securitygroupname=security_group_name,
            startport=start_port,
            endport=end_port,
            cidrlist=cidr_list,
            protocol=protocol)
    def create_security_groups(self):

        # Security group for Cosmo created instances
        # Security group for Cosmo manager, allows created
        # instances -> manager communication
        lgr.debug('reading management security-group configuration.')
        management_sg_config = self.provider_config['networking'][
            'management_security_group']
        management_sg_name = management_sg_config['name']

        if not self._is_sg_exists(management_sg_name):
            lgr.info('creating management security group: {0}'.format(
                management_sg_name))
            self.cloud_driver.ex_create_security_group(management_sg_name)

            mgmt_ports = management_sg_config['ports']
            # for each port, add rule
            for port in mgmt_ports:
                cidr = management_sg_config.get('cidr', None)
                protocol = management_sg_config.get('protocol', None)
                self._add_rule(security_group_name=management_sg_name,
                               start_port=port,
                               end_port=None,
                               cidr_list=cidr,
                               protocol=protocol)
        else:
            lgr.info('using existing management security group {0}'.format(
                management_sg_name))

        lgr.debug('reading agent security-group configuration.')
        agent_sg_config = self.provider_config['networking'][
            'agents_security_group']
        agent_sg_name = agent_sg_config['name']

        if not self._is_sg_exists(agent_sg_name):
            lgr.info('creating agent security group {0}'.format(agent_sg_name))
            self.cloud_driver.ex_create_security_group(agent_sg_name)

            agent_ports = agent_sg_config['ports']
            # for each port, add rule
            for port in agent_ports:
                cidr = agent_sg_config['cidr']
                protocol = agent_sg_config['protocol']
                self._add_rule(security_group_name=agent_sg_name,
                               start_port=port,
                               end_port=None,
                               cidr_list=cidr,
                               protocol=protocol)
        else:
            lgr.info(
                'using existing agent security group {0}'.format(
                    agent_sg_name))
def _read_config(config_file_path):
    if not config_file_path:
        config_file_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            CONFIG_FILE_NAME)
    defaults_config_file_path = os.path.join(
        os.path.dirname(os.path.realpath(__file__)),
        DEFAULTS_CONFIG_FILE_NAME)

    if not os.path.exists(config_file_path) or not os.path.exists(
            defaults_config_file_path):
        if not os.path.exists(defaults_config_file_path):
            raise ValueError('Missing the defaults configuration file; '
                             'expected to find it at {0}'.format(
                                 defaults_config_file_path))
        raise ValueError('Missing the configuration file; expected to find '
                         'it at {0}'.format(config_file_path))

    lgr.debug('reading provider config files')
    with open(config_file_path, 'r') as config_file, open(
            defaults_config_file_path, 'r') as defaults_config_file:

        lgr.debug('safe loading user config')
        user_config = yaml.safe_load(config_file.read())

        lgr.debug('safe loading default config')
        defaults_config = yaml.safe_load(defaults_config_file.read())

    lgr.debug('merging configurations')
    merged_config = _deep_merge_dictionaries(user_config, defaults_config) \
        if user_config else defaults_config
    return merged_config
def init(target_directory, reset_config, is_verbose_output=False):
    if not reset_config and os.path.exists(
            os.path.join(target_directory, CONFIG_FILE_NAME)):
        lgr.debug('config file path {0} already exists. '
                  'either set a different config target directory '
                  'or enable reset_config property'.format(target_directory))
        return False

    provider_dir = os.path.dirname(os.path.realpath(__file__))
    files_path = os.path.join(provider_dir, CONFIG_FILE_NAME)

    lgr.debug('Copying provider files from {0} to {1}'.format(
        files_path, target_directory))
    shutil.copy(files_path, target_directory)
    return True
    def _add_rule(self,
                  security_group_name,
                  protocol,
                  cidr_list,
                  start_port,
                  end_port=None):

        lgr.debug(
            'creating security-group rule for {0} with details {1}'.format(
                security_group_name,
                locals().values()))
        self.cloud_driver.ex_authorize_security_group_ingress(
            securitygroupname=security_group_name,
            startport=start_port,
            endport=end_port,
            cidrlist=cidr_list,
            protocol=protocol)
    def create_key_pairs(self,
                         mgmt_private_key_target_path=None,
                         mgmt_public_key_filepath=None,
                         mgmt_keypair_name=None,
                         agent_private_key_target_path=None,
                         agent_public_key_filepath=None,
                         agent_keypair_name=None):

        lgr.debug('reading management keypair configuration')
        mgmt_kp_config = self.provider_config['compute']['management_server'][
            'management_keypair']
        self._create_keypair(mgmt_kp_config, mgmt_private_key_target_path,
                             mgmt_public_key_filepath, mgmt_keypair_name)

        lgr.debug('reading agent keypair configuration')
        agent_kp_config = self.provider_config['compute']['agent_servers'][
            'agents_keypair']
        self._create_keypair(agent_kp_config, agent_private_key_target_path,
                             agent_public_key_filepath, agent_keypair_name)
    def teardown(self, provider_context, ignore_validation=False):
        """
        tears down the management server and its accompanied provisioned
        resources

        :param dict provider_context: context information with the previously
         provisioned resources
        :param bool ignore_validation: should the teardown process ignore
         conflicts during teardown
        :rtype: 'None'
        """
        management_ip = provider_context['ip']
        lgr.info('tearing-down management vm {0}.'.format(management_ip))

        # lgr.debug('reading configuration file {0}'.format(config_path))
        # provider_config = _read_config(config_path)

        # init keypair and security-group resource creators.
        cloud_driver = ExoscaleConnector(self.provider_config).create()
        keypair_creator = ExoscaleKeypairCreator(cloud_driver,
                                                 self.provider_config)
        security_group_creator = ExoscaleSecurityGroupCreator(
            cloud_driver, self.provider_config)
        # init compute node creator
        compute_creator = ExoscaleComputeCreator(cloud_driver,
                                                 self.provider_config,
                                                 keypair_name=None,
                                                 security_group_name=None,
                                                 node_name=None)

        resource_terminator = ExoscaleResourceTerminator(
            security_group_creator,  # NOQA
            keypair_creator,
            compute_creator,
            management_ip)

        lgr.debug('terminating management vm and all of its resources.')
        resource_terminator.terminate_resources()
    def create_key_pairs(self,
                         mgmt_private_key_target_path=None,
                         mgmt_public_key_filepath=None,
                         mgmt_keypair_name=None,
                         agent_private_key_target_path=None,
                         agent_public_key_filepath=None,
                         agent_keypair_name=None):

        lgr.debug('reading management keypair configuration')
        mgmt_kp_config = self.provider_config['compute']['management_server'][
            'management_keypair']
        self._create_keypair(mgmt_kp_config,
                             mgmt_private_key_target_path,
                             mgmt_public_key_filepath,
                             mgmt_keypair_name)

        lgr.debug('reading agent keypair configuration')
        agent_kp_config = self.provider_config['compute']['agent_servers'][
            'agents_keypair']
        self._create_keypair(agent_kp_config,
                             agent_private_key_target_path,
                             agent_public_key_filepath,
                             agent_keypair_name)
    def teardown(self, provider_context, ignore_validation=False):
        """
        tears down the management server and its accompanied provisioned
        resources

        :param dict provider_context: context information with the previously
         provisioned resources
        :param bool ignore_validation: should the teardown process ignore
         conflicts during teardown
        :rtype: 'None'
        """
        management_ip = provider_context['ip']
        lgr.info('tearing-down management vm {0}.'.format(management_ip))

        # lgr.debug('reading configuration file {0}'.format(config_path))
        # provider_config = _read_config(config_path)

        # init keypair and security-group resource creators.
        cloud_driver = ExoscaleConnector(self.provider_config).create()
        keypair_creator = ExoscaleKeypairCreator(
            cloud_driver, self.provider_config)
        security_group_creator = ExoscaleSecurityGroupCreator(
            cloud_driver, self.provider_config)
        # init compute node creator
        compute_creator = ExoscaleComputeCreator(cloud_driver,
                                                 self.provider_config,
                                                 keypair_name=None,
                                                 security_group_name=None,
                                                 node_name=None)

        resource_terminator = ExoscaleResourceTerminator(security_group_creator,  # NOQA
                                                         keypair_creator,
                                                         compute_creator,
                                                         management_ip)

        lgr.debug('terminating management vm and all of its resources.')
        resource_terminator.terminate_resources()
    def delete_security_groups(self):

        mgmt_security_group_name = self.get_mgmt_security_group_name()
        lgr.debug('deleting management security-group {0}'.format(
            mgmt_security_group_name))
        try:
            self.cloud_driver.ex_delete_security_group(
                mgmt_security_group_name)
        except:
            lgr.warn('management security-group {0} may not have been deleted'.
                     format(mgmt_security_group_name))
            pass

        agents_security_group_name = self._get_agent_security_group_name()
        lgr.debug('deleting agents security-group {0}'.format(
            agents_security_group_name))
        try:
            self.cloud_driver.ex_delete_security_group(
                agents_security_group_name)
        except:
            lgr.warn(
                'agent security-group {0} may not have been deleted'.format(
                    agents_security_group_name))
            pass
    def _create_keypair(self,
                        keypair_config,
                        private_key_target_path=None,
                        public_key_filepath=None,
                        keypair_name=None):

        if not keypair_name:
            keypair_name = keypair_config['name']
        if not private_key_target_path:
            private_key_target_path = keypair_config.get(
                'auto_generated', {}).get('private_key_target_path', None)
        if not public_key_filepath:
            public_key_filepath = keypair_config.get('provided', {}).get(
                'public_key_filepath', None)

        if self._get_keypair(keypair_name):
            lgr.info('using existing keypair {0}'.format(keypair_name))
            return
        else:
            if not private_key_target_path and not public_key_filepath:
                raise RuntimeError(
                    '{0} keypair not found. '
                    'you must provide either a private key target path, '
                    'public key file-path or an existing keypair name '
                    'in configuration file')

        if public_key_filepath:
            if not os.path.exists(public_key_filepath):
                raise RuntimeError('public key {0} was not found on your local'
                                   'file system.'.format(public_key_filepath))

            lgr.debug('importing public key with name {0} from {1}'.format(
                keypair_name, public_key_filepath))
            self.cloud_driver.import_key_pair_from_file(
                keypair_name, public_key_filepath)
        else:
            lgr.info('creating a keypair named {0}'.format(keypair_name))
            result = self.cloud_driver.create_key_pair(keypair_name)
            pk_target_path = os.path.expanduser(private_key_target_path)

            try:
                lgr.debug('creating dir {0}'.format(pk_target_path))
                os.makedirs(os.path.dirname(private_key_target_path))
            except OSError, exc:
                if not exc.errno == errno.EEXIST or not \
                        os.path.isdir(os.path.dirname(
                            private_key_target_path)):
                    raise

            lgr.debug('writing private key to file {0}'.format(pk_target_path))
            with open(pk_target_path, 'w') as f:
                f.write(result.private_key)
                os.system('chmod 600 {0}'.format(pk_target_path))
    def _create_keypair(self, keypair_config,
                        private_key_target_path=None,
                        public_key_filepath=None,
                        keypair_name=None):

        if not keypair_name:
            keypair_name = keypair_config['name']
        if not private_key_target_path:
            private_key_target_path = keypair_config.get(
                'auto_generated', {}).get('private_key_target_path', None)
        if not public_key_filepath:
            public_key_filepath = keypair_config.get('provided', {}).get(
                'public_key_filepath', None)

        if self._get_keypair(keypair_name):
            lgr.info('using existing keypair {0}'.format(keypair_name))
            return
        else:
            if not private_key_target_path and not public_key_filepath:
                raise RuntimeError(
                    '{0} keypair not found. '
                    'you must provide either a private key target path, '
                    'public key file-path or an existing keypair name '
                    'in configuration file')

        if public_key_filepath:
            if not os.path.exists(public_key_filepath):
                raise RuntimeError('public key {0} was not found on your local'
                                   'file system.'.format(public_key_filepath))

            lgr.debug('importing public key with name {0} from {1}'.format(
                keypair_name, public_key_filepath))
            self.cloud_driver.import_key_pair_from_file(keypair_name,
                                                        public_key_filepath)
        else:
            lgr.info('creating a keypair named {0}'.format(keypair_name))
            result = self.cloud_driver.create_key_pair(keypair_name)
            pk_target_path = os.path.expanduser(private_key_target_path)

            try:
                lgr.debug('creating dir {0}'.format(pk_target_path))
                os.makedirs(os.path.dirname(private_key_target_path))
            except OSError, exc:
                if not exc.errno == errno.EEXIST or not \
                        os.path.isdir(os.path.dirname(
                            private_key_target_path)):
                    raise

            lgr.debug('writing private key to file {0}'.format(pk_target_path))
            with open(pk_target_path, 'w') as f:
                f.write(result.private_key)
                os.system('chmod 600 {0}'.format(pk_target_path))
    def create_node(self):

        lgr.debug('reading server configuration.')
        server_config = self.provider_config.get('compute', {}) \
            .get('management_server', {}).get('instance', None)

        lgr.debug('reading management vm image and size IDs from config')
        image_id = server_config.get('image')
        size_id = server_config.get('size')

        lgr.debug('getting node image for ID {0}'.format(image_id))
        image = [
            image for image in self.cloud_driver.list_images()
            if image_id == image.id
        ][0]
        lgr.debug('getting node size for ID {0}'.format(size_id))
        size = [
            size for size in self.cloud_driver.list_sizes()
            if size.name == size_id
        ][0]

        if self.node_name is None:
            self.node_name = server_config.get('name', None)
        if self.keypair_name is None:
            self.keypair_name = server_config['management_keypair']['name']
        if self.security_group_names is None:
            network_config = self.provider_config.get('networking', {}) \
                .get('management_security_group', {})
            self.security_group_names = [
                network_config['name'],
            ]

        lgr.info('starting a new virtual instance named {0}'.format(
            self.node_name))
        result = self.cloud_driver.create_node(
            name=self.node_name,
            ex_keyname=self.keypair_name,
            ex_security_groups=self.security_group_names,
            image=image,
            size=size)

        return result.public_ips[0]
    def create_node(self):

        lgr.debug('reading server configuration.')
        server_config = self.provider_config.get('compute', {}) \
            .get('management_server', {}).get('instance', None)

        lgr.debug('reading management vm image and size IDs from config')
        image_id = server_config.get('image')
        size_id = server_config.get('size')

        lgr.debug('getting node image for ID {0}'.format(image_id))
        image = [image for image in self.cloud_driver.list_images() if
                 image_id == image.id][0]
        lgr.debug('getting node size for ID {0}'.format(size_id))
        size = [size for size in self.cloud_driver.list_sizes() if
                size.name == size_id][0]

        if self.node_name is None:
            self.node_name = server_config.get('name', None)
        if self.keypair_name is None:
            self.keypair_name = server_config['management_keypair']['name']
        if self.security_group_names is None:
            network_config = self.provider_config.get('networking', {}) \
                .get('management_security_group', {})
            self.security_group_names = [network_config['name'], ]

        lgr.info(
            'starting a new virtual instance named {0}'.format(self.node_name))
        result = self.cloud_driver.create_node(
            name=self.node_name,
            ex_keyname=self.keypair_name,
            ex_security_groups=self.security_group_names,
            image=image,
            size=size)

        return result.public_ips[0]
    def provision(self):
        """
        provisions resources for the management server

        returns a tuple with the machine's public and private ip's,
        the ssh key and user configured in the config yaml and
        the prorivder's context (a dict containing the privisioned
        resources to be used during teardown)

        the tuple's order should correspond with the above order.

        :rtype: 'tuple' with machine context.
        """
        lgr.info('bootstrapping to Exoscale provider.')

        lgr.debug('reading configuration file')
        # provider_config = _read_config(None)

        # init keypair and security-group resource creators.
        cloud_driver = ExoscaleConnector(self.provider_config).create()
        keypair_creator = ExoscaleKeypairCreator(cloud_driver,
                                                 self.provider_config)
        security_group_creator = ExoscaleSecurityGroupCreator(
            cloud_driver, self.provider_config)

        # create required node topology
        lgr.debug('creating the required resources for management vm')
        security_group_creator.create_security_groups()
        keypair_creator.create_key_pairs()

        keypair_name = keypair_creator.get_management_keypair_name()
        sg_name = security_group_creator.get_mgmt_security_group_name()

        lgr.debug('reading server configuration.')
        mgmt_server_config = self.provider_config.get('compute', {}) \
            .get('management_server', {})

        # init compute node creator
        compute_creator = ExoscaleComputeCreator(cloud_driver,
                                                 self.provider_config,
                                                 keypair_name, sg_name)

        # spinning-up a new instance using the above topology.
        # Exoscale provider supports only public ip allocation.
        # see cloudstack 'basic zone'
        public_ip = compute_creator.create_node()

        provider_context = {"ip": str(public_ip)}

        print('public ip: ' + public_ip + ' key name: ' +
              self._get_private_key_path_from_keypair_config(  # NOQA
                  mgmt_server_config['management_keypair']) + 'user name: ' +
              mgmt_server_config.get('user_on_management'))  # NOQA

        self.copy_files_to_manager(
            public_ip, self.provider_config,
            self._get_private_key_path_from_keypair_config(
                mgmt_server_config['management_keypair']),
            mgmt_server_config.get('user_on_management'))

        return public_ip, \
            public_ip, \
            self._get_private_key_path_from_keypair_config(
                mgmt_server_config['management_keypair']), \
            mgmt_server_config.get('user_on_management'), \
            provider_context
    def provision(self):
        """
        provisions resources for the management server

        returns a tuple with the machine's public and private ip's,
        the ssh key and user configured in the config yaml and
        the prorivder's context (a dict containing the privisioned
        resources to be used during teardown)

        the tuple's order should correspond with the above order.

        :rtype: 'tuple' with machine context.
        """
        lgr.info('bootstrapping to Exoscale provider.')

        lgr.debug('reading configuration file')
        # provider_config = _read_config(None)

        # init keypair and security-group resource creators.
        cloud_driver = ExoscaleConnector(self.provider_config).create()
        keypair_creator = ExoscaleKeypairCreator(
            cloud_driver, self.provider_config)
        security_group_creator = ExoscaleSecurityGroupCreator(
            cloud_driver, self.provider_config)

        # create required node topology
        lgr.debug('creating the required resources for management vm')
        security_group_creator.create_security_groups()
        keypair_creator.create_key_pairs()

        keypair_name = keypair_creator.get_management_keypair_name()
        sg_name = security_group_creator.get_mgmt_security_group_name()

        lgr.debug('reading server configuration.')
        mgmt_server_config = self.provider_config.get('compute', {}) \
            .get('management_server', {})

        # init compute node creator
        compute_creator = ExoscaleComputeCreator(cloud_driver,
                                                 self.provider_config,
                                                 keypair_name,
                                                 sg_name)

        # spinning-up a new instance using the above topology.
        # Exoscale provider supports only public ip allocation.
        # see cloudstack 'basic zone'
        public_ip = compute_creator.create_node()

        provider_context = {"ip": str(public_ip)}

        print('public ip: ' + public_ip + ' key name: ' + self._get_private_key_path_from_keypair_config(  # NOQA
            mgmt_server_config['management_keypair']) + 'user name: ' + mgmt_server_config.get('user_on_management'))  # NOQA

        self.copy_files_to_manager(
            public_ip,
            self.provider_config,
            self._get_private_key_path_from_keypair_config(
                mgmt_server_config['management_keypair']),
            mgmt_server_config.get('user_on_management'))

        return public_ip, \
            public_ip, \
            self._get_private_key_path_from_keypair_config(
                mgmt_server_config['management_keypair']), \
            mgmt_server_config.get('user_on_management'), \
            provider_context
 def create(self):
     lgr.debug('creating exoscale cloudstack connector')
     api_key = self.config['authentication']['api_key']
     api_secret_key = self.config['authentication']['api_secret_key']
     cls = get_driver(Provider.EXOSCALE)
     return cls(api_key, api_secret_key)
 def create(self):
     lgr.debug('creating exoscale cloudstack connector')
     api_key = self.config['authentication']['api_key']
     api_secret_key = self.config['authentication']['api_secret_key']
     cls = get_driver(Provider.EXOSCALE)
     return cls(api_key, api_secret_key)