Exemple #1
0
 def _settings():
     grains = __grains__
     pillar = __pillar__
     local_conf = __salt__['mc_macros.get_local_registry'](
         'mumble', registry_format='pack')
     password = local_conf.setdefault('password', secure_password(8))
     supassword = local_conf.setdefault('supassword', secure_password(32))
     htpassword = local_conf.setdefault('htpassword', secure_password(32))
     locations = __salt__['mc_locations.settings']()
     fqdn = grains['id']
     mumbleData = __salt__['mc_utils.defaults'](
         'makina-states.services.sound.mumble', {
             'default': {
                 'use_caps': 0,
                 'start': 1,
                 'nofile': 16384,
             },
             'murmur': {
                 'supassword': htpassword,
                 'uname': 'mumble-server',
                 'password': password,
                 'sendversion': 'True',
                 'allowhtml': 'True',
                 'port': 64738,
                 'ice': 'tcp -h 127.0.0.1 -p 6502',
                 'textmessagelength': 0,
                 'imagemessagelength': 0,
             },
         })
     __salt__['mc_macros.update_local_registry']('mumble',
                                                 local_conf,
                                                 registry_format='pack')
     return mumbleData
Exemple #2
0
 def _settings():
     grains = __grains__
     pillar = __pillar__
     local_conf = __salt__['mc_macros.get_local_registry'](
         'mumble', registry_format='pack')
     password = local_conf.setdefault('password', secure_password(8))
     supassword = local_conf.setdefault('supassword', secure_password(32))
     htpassword = local_conf.setdefault('htpassword', secure_password(32))
     locations = __salt__['mc_locations.settings']()
     fqdn = grains['id']
     mumbleData = __salt__['mc_utils.defaults'](
         'makina-states.services.sound.mumble', {
             'default': {
                 'use_caps': 0,
                 'start': 1,
                 'nofile': 16384,
             },
             'murmur': {
                 'supassword': htpassword,
                 'uname': 'mumble-server',
                 'password': password,
                 'sendversion': 'True',
                 'allowhtml': 'True',
                 'port': 64738,
                 'ice': 'tcp -h 127.0.0.1 -p 6502',
                 'textmessagelength': 0,
                 'imagemessagelength': 0,
             },
         }
     )
     __salt__['mc_macros.update_local_registry'](
         'mumble', local_conf, registry_format='pack')
     return mumbleData
Exemple #3
0
def set_password(username, password, encrypted=False, role=None, crypt_salt=None, algorithm='sha256'):
    '''
    Set users password on switch

    .. code-block:: bash

        salt '*' nxos.cmd set_password admin TestPass
        salt '*' nxos.cmd set_password admin \\
            password='******' \\
            encrypted=True
    '''
    password_line = get_user(username)
    if encrypted is False:
        if crypt_salt is None:
            # NXOS does not like non alphanumeric characters.  Using the random module from pycrypto
            # can lead to having non alphanumeric characters in the salt for the hashed password.
            crypt_salt = secure_password(8, use_random=False)
        hashed_pass = gen_hash(crypt_salt=crypt_salt, password=password, algorithm=algorithm)
    else:
        hashed_pass = password
    password_line = 'username {0} password 5 {1}'.format(username, hashed_pass)
    if role is not None:
        password_line += ' role {0}'.format(role)
    try:
        sendline('config terminal')
        ret = sendline(password_line)
        sendline('end')
        sendline('copy running-config startup-config')
        return '\n'.join([password_line, ret])
    except TerminalException as e:
        log.error(e)
        return 'Failed to set password'
Exemple #4
0
def set_password(username, password, encrypted=False, role=None, crypt_salt=None, algorithm='sha256'):
    '''
    Set users password on switch

    .. code-block:: bash

        salt '*' nxos.cmd set_password admin TestPass
        salt '*' nxos.cmd set_password admin \\
            password='******' \\
            encrypted=True
    '''
    password_line = get_user(username)
    if encrypted is False:
        if crypt_salt is None:
            # NXOS does not like non alphanumeric characters.  Using the random module from pycrypto
            # can lead to having non alphanumeric characters in the salt for the hashed password.
            crypt_salt = secure_password(8, use_random=False)
        hashed_pass = gen_hash(crypt_salt=crypt_salt, password=password, algorithm=algorithm)
    else:
        hashed_pass = password
    password_line = 'username {0} password 5 {1}'.format(username, hashed_pass)
    if role is not None:
        password_line += ' role {0}'.format(role)
    try:
        sendline('config terminal')
        ret = sendline(password_line)
        sendline('end')
        sendline('copy running-config startup-config')
        return '\n'.join([password_line, ret])
    except TerminalException as e:
        log.error(e)
        return 'Failed to set password'
Exemple #5
0
def set_password(username,
                 password,
                 encrypted=False,
                 role=None,
                 crypt_salt=None,
                 algorithm='sha256',
                 **kwargs):
    '''
    Set users password on switch.

    username
        Username to configure

    password
        Password to configure for username

    encrypted
        Whether or not to encrypt the password
        Default: False

    role
        Configure role for the username
        Default: None

    crypt_salt
        Configure crypt_salt setting
        Default: None

    alogrithm
        Encryption algorithm
        Default: sha256

    no_save_config
        If True, don't save configuration commands to startup configuration.
        If False, save configuration to startup configuration.
        Default: False

    .. code-block:: bash

        salt '*' nxos.cmd set_password admin TestPass
        salt '*' nxos.cmd set_password admin \\
            password='******' \\
            encrypted=True
    '''
    password_line = get_user(username, **kwargs)
    if encrypted is False:
        if crypt_salt is None:
            # NXOS does not like non alphanumeric characters.  Using the random module from pycrypto
            # can lead to having non alphanumeric characters in the salt for the hashed password.
            crypt_salt = secure_password(8, use_random=False)
        hashed_pass = gen_hash(crypt_salt=crypt_salt,
                               password=password,
                               algorithm=algorithm)
    else:
        hashed_pass = password
    password_line = 'username {0} password 5 {1}'.format(username, hashed_pass)
    if role is not None:
        password_line += ' role {0}'.format(role)
    return config(password_line, **kwargs)
def find_password_for_vm(vm, default=None, pwlen=32, target=None):
    '''
    Return the vm password after creating it
    the first time
    '''
    if target is None:
        target = target_for_vm(vm)
    password = get_conf_for_vm(vm, 'password', target=target)
    if not password:
        password = default
    if not password:
        password = secure_password(pwlen)
        if not password:
            raise Exception('Error while setting password '
                            'grain for {0}/{1}'.format(target, vm))
    return password
def find_password_for_vm(target,
                         virt_type,
                         vm,
                         default=None,
                         pwlen=32):
    '''Return the vm password after creating it
    the first time
    '''
    password = get_conf_for_vm(target, virt_type, vm, 'password')
    if not password:
        password = default
    if not password:
        password = secure_password(pwlen)
        if not password:
            raise Exception('Error while setting password '
                            'grain for {0}/{1}'.format(target, vm))
    return password
Exemple #8
0
 def _settings():
     _g = __grains__
     _p = __pillar__
     locations = __salt__['mc_locations.settings']()
     local_conf = __salt__['mc_macros.get_local_registry'](
         'dhcpd', registry_format='pack')
     cn_pass = local_conf.setdefault('cn_pass', secure_password(32))
     pkgs = ['dhcp3-server']
     if _g.get('os') in ['Ubuntu']:
         if _g['osrelease'] >= 15.10:
             pkgs = ['isc-dhcp-server']
     dhcpdData = __salt__['mc_utils.defaults'](
         'makina-states.services.dns.dhcpd6', {
             'dhcpd_directory': "/etc/dhcpd",
             'templates': {
                 '/etc/default/isc-dhcp-server6': {},
                 '/etc/systemd/system/isc-dhcp-server6.service': {},
                 '/etc/dhcp/dhcpd6.conf': {},
             },
             'extra_dirs': [],
             'defaults': {
                 'INTERFACES': '',
                 'OPTIONS': '',
             },
             'subnets': {},
             'hosts': {},
             'conf': {
                 'ddns_update_style': 'none',
                 'domain_name': 'example.org',
                 'default_lease_time': '864000',
                 'max_lease_time': '864000',
                 'log_facility': 'local7',
                 'domain_name_servers':
                 ('ns1.example.org, ns2.example.org'),
             },
             'pkgs': pkgs,
             'service_name': 'isc-dhcp-server6'
         })
     return dhcpdData
Exemple #9
0
    def _settings():
        salt = __salt__
        local_conf = salt['mc_macros.get_local_registry'](
            'burp', registry_format='pack')
        ca_pass = local_conf.get('ca_pass', secure_password(32))
        server_pass = local_conf.get('server_pass', secure_password(32))
        timers = local_conf.setdefault('timers', OrderedDict())
        local_conf['ca_pass'] = ca_pass
        local_conf['server_pass'] = server_pass
        grains = __grains__
        pillar = __pillar__
        locs = __salt__['mc_locations.settings']()
        data = {}
        data = __salt__['mc_utils.defaults'](
            'makina-states.services.backup.burp', {
                'ver': '1.3.48',
                'user': '******',
                'group': 'root',
                'ssl_cert_ca': '/etc/burp/ssl_cert_ca.pem',
                'server_conf': {
                    'fqdn': grains['id'],
                    'port': '4971',
                    'status_port': '7972',
                    'client_port': '4971',
                    'client_status_port': '7972',
                    'directory': '/data/burp',
                    'clientconfdir': '/etc/burp/clientconfdir',
                    'pidfile': '/var/run/burp.server.pid',
                    'hardlinked_archive': '1',
                    'working_dir_recovery_method': 'delete',
                    'max_children': '5',
                    'max_status_children': '5',
                    'umask': '0022',
                    'syslog': '1',
                    'stdout': '0',
                    'client_can_delete': '0',
                    'client_can_force_backup': '0',
                    'client_can_list': '1',
                    'client_can_restore': '1',
                    'client_can_verify': '1',
                    'version_warn': '1',
                    'keep': [7, 4, 6],
                    # s (sec), m (min), h (hours), d (day), w (week), n (month)
                    # Allow backups to start in the evenings and nights
                    # during weekdays
                    # Allow more hours at the weekend.
                    'timer_script': '/etc/burp/timer_script',
                    'timer_arg': ['20h',
                                  ('Mon,Tue,Wed,Thu,Fri,'
                                   '00,01,02,03,04,05,19,20,21,22,23'),
                                  ('Sat,Sun,'
                                   '00,01,02,03,04,05,06,07,08,'
                                   '17,18,19,20,21,22,23')],
                    'ca_conf': '/etc/burp/CA.cnf',
                    'ca_name': 'burpCA',
                    'ca_server_name': grains['id'],
                    'ca_burp_ca': '/usr/sbin/burp_ca',
                    'ssl_cert': '/etc/burp/ssl_cert-server.pem',
                    'ssl_key': '/etc/burp/ssl_cert-server.key',
                    'ssl_key_password': server_pass,
                    'ssl_dhfile': '/etc/burp/dhfile.pem',
                    'notify_failure_script': '/etc/burp/notify_script',
                    'notify_failure_arg': [
                        'sendmail -t',
                        'To: root',
                        'From: "burp {0}" <*****@*****.**>'.format(grains['id']),
                        'Subject: %b failed: %c %w'],
                    'server_script_pre': None,
                    'server_script_pre_arg': None,
                    'server_script_pre_notify': '0',
                    'server_script_post': None,
                    'server_script_post_arg': None,
                    'server_script_post_run_on_fail': '0',
                    'server_script_post_notify': '0',
                    'restore_client': None,
                },
                'client_common': {
                    'dedup_group': 'linux',
                    'mode': 'client',
                    'port': '4971',
                    'pidfile': '/var/run/burp.client.pid',
                    'syslog': '0',
                    'stdout': '1',
                    'progress_counter': '1',
                    'ratelimit': None,
                    'network_timeout': '7200',
                    'autoupgrade_dir': None,
                    'autoupgrade_os': None,
                    'server_can_restore': '1',
                    'cross_filesystem': [],
                    'cross_all_filesystems': '0',
                    'encryption_password': None,
                    'ca_burp_ca': None,
                    'ca_csr_dir': None,
                    'ssl_cert': '/etc/burp/ssl_cert-client.pem',
                    'ssl_key': '/etc/burp/ssl_cert-client.key',
                    'ssl_ciphers': None,
                    'backup_script_pre': None,
                    'backup_script_post': None,
                    'restore_script_pre': None,
                    'restore_script_post': None,
                    'include': ['/'],
                    'exclude': None,
                    'exclude_ext': ['pyc',
                                    'pyo'],
                    'exclude_regex': None,
                    'exclude_fs': ['sysfs', 'tmpfs'],
                    'min_file_size': None,
                    'max_file_size': None,
                    'nobackup': '.nobackup',
                    'read_fifo': None,
                    'read_all_fifos': None,
                    'read_blockdev': None,
                    'read_all_blockdevs': '0',
                    'exclude_comp': None,
                    'cron_periodicity': (
                        "* * * * *"
                    ),
                    'cron_cmd': (
                        " {user} [ -x /usr/sbin/burp ] && "
                        " /usr/sbin/burp -a t "
                        "   >>/var/log/burp-client 2>&1"
                    )
                },
                'clients': {
                    # mapping of clients confs (defined in pillar),
                }
            }
        )
        for k in ['user',
                  'group',
                  'ssl_cert_ca']:
            data['server_conf'][k] = data['client_common'][k] = data[k]
        data['client_common']['cron_cmd'].format(user=data['user'])
        data['client_common'].setdefault('server',
                                         data['server_conf']['fqdn'])
        data['client_common'].setdefault(
            'ssl_peer_cn', data['server_conf']['fqdn'])
        data['clients'].setdefault(data['server_conf']['fqdn'], {})
        hour = [0, 20, 40]
        for cname in [a for a in data['clients']]:
            cl = data['clients'][cname]
            cl['cname'] = cname
            ssh_port = cl.get('ssh_port', '')
            if ssh_port:
                ssh_port = '-p {0}'.format(ssh_port)
            cl['rsh_cmd'] = 'ssh {1} {2} {0} {3}'.format(
                '-oStrictHostKeyChecking=no',
                # Set hosts key database path to /dev/null, ie, non-existing
                '-oUserKnownHostsFile=/dev/null',
                # Don't re-use the SSH connection. Less failures.
                '-oControlPath=none',
                ssh_port,
            )
            cl['rsh_dst'] = '{1}@{0}'.format(cname,
                                             cl.get('ssh_user', 'root'))
            cl['ssh_cmd'] = cl['rsh_cmd'] + '{1}@{0}'.format(
                cname, cl.get('ssh_user', 'root'))
            if 'ssh_gateway' in cl:
                ssh_gateway_key = ''
                if 'ssh_gateway_key' in cl:
                    ssh_gateway_key = '-i {0}'.format(cl['ssh_gateway_key'])
                ssh_gateway = cl['ssh_gateway']
                ssh_gateway_port = ''
                if ':' in ssh_gateway:
                    ssh_gateway, ssh_gateway_port = ssh_gateway.split(':')
                if ssh_gateway_port:
                    ssh_gateway_port = '-p {0}'.format(ssh_gateway_port)
                # Setup ProxyCommand
                proxy_cmd = (
                    ' -oProxyCommand="'
                    'ssh {0} {1} {2} {3} {4}@{5} {6} '
                    'nc -q0 %h %p"'
                ).format(
                    '-oStrictHostKeyChecking=no',
                    # Set hosts key database path to /dev/null
                    '-oUserKnownHostsFile=/dev/null',
                    # Don't re-use the SSH connection. Less failures.
                    '-oControlPath=none',
                    ssh_gateway_key,
                    cl.get('ssh_gateway_user', 'root'),
                    ssh_gateway,
                    ssh_gateway_port
                )
                cl['ssh_cmd'] += proxy_cmd
                cl['rsh_cmd'] += proxy_cmd

            # backup host is only a client to query backups
            # and restore
            # we do not backup the backups locally
            cl.setdefault('activated', True)
            cl.setdefault('restore_client', '')
            restore_clients = [a for a in cl['restore_client'].split(',')
                               if a.strip()]
            if not data['server_conf']['fqdn'] in restore_clients:
                restore_clients.append(data['server_conf']['fqdn'])
            cl['restore_client'] = ','.join(restore_clients)
            if cl['cname'] == data['server_conf']['fqdn']:
                cl['activated'] = False
                cl['port'] = data['server_conf']['client_port']
                cl['status_port'] = data['server_conf']['client_status_port']
            if not cl['activated']:
                cl['include'] = []
                cl['cross_all_filesystems'] = []
                cl['exclude_regex'] = ['.*']
            cpassk = ('clients_passwords.{0}'.format(cname))
            cpass = local_conf.get(cpassk, secure_password(32))
            local_conf[cpassk] = cpass
            cl['ssl_key_password'] = cpass
            cpassk = ('clients_clipasswords.{0}'.format(cname))
            cpass = local_conf.get(cpassk, secure_password(32))
            local_conf[cpassk] = cpass
            cl['password'] = cpass
            for k, val in data['client_common'].items():
                # spray around the periodicity to spray the backup load
                # all over the hour.
                if k == 'cron_periodicity':
                    val = timers.get(cname, None)
                    # val = None
                    if not val:
                        per = hour[:]
                        for ix, item in enumerate(per[:]):
                            item = item + (
                                random.randint(0, 3) +
                                random.randint(0, 3) +
                                random.randint(0, 3) +
                                random.randint(0, 3) +
                                random.randint(0, 3) +
                                random.randint(0, 3))  # max 18 < 20m in
                            if item >= 60:
                                item = item - 60
                            per[ix] = item
                        val = '{0} * * * *'.format(','.join(
                            ["{0}".format(t) for t in per]))
                    timers[cname] = val
                cl.setdefault(k, val)
        to_delete = [a for a in local_conf
                     if a.count('makina-states.local.burp.') >= 1]
        for a in to_delete:
            local_conf.pop(a, None)
        salt['mc_macros.update_registry_params'](
            'burp', local_conf, registry_format='pack')
        return data
Exemple #10
0
 def _settings():
     _s, _g = __salt__, __grains__
     debmode = None
     mc_pkgs = _s['mc_pkgs.settings']()
     ppa = None
     source = False
     pkgs = ['burp']
     fromrepo = None
     if debmode == 'debsource':
         pkgs = [
             'librsync-dev', 'zlib1g-dev', 'libssl-dev', 'uthash-dev',
             'rsync', 'build-essential', 'libncurses5-dev', 'libacl1-dev'
         ]
     if _g['os'] in ['Ubuntu'] and _g['osrelease'] < '14.04':
         ppa = ('deb'
                ' http://ppa.launchpad.net/bas-dikkenberg/'
                'burp-stable/ubuntu'
                ' {udist} main').format(**mc_pkgs)
     if _g['os'] in ['Debian']:
         fromrepo = 'sid'
         if _g['osrelease'][0] < '6':
             source = True
     local_conf = _s['mc_macros.get_local_registry']('burp',
                                                     registry_format='pack')
     ca_pass = local_conf.get('ca_pass', secure_password(32))
     server_pass = local_conf.get('server_pass', secure_password(32))
     timers = local_conf.setdefault('timers_v2', OrderedDict())
     local_conf['ca_pass'] = ca_pass
     local_conf['server_pass'] = server_pass
     data = _s['mc_utils.defaults'](
         'makina-states.services.backup.burp',
         {
             'ppa': ppa,
             'source': source,
             'fromrepo': fromrepo,
             'pkgs': pkgs,
             'admins': 'root',
             'cron_activated': True,
             'cron_periodicity': '40 0,6,12,18 * * *',
             'ver': '1.3.48',
             'user': '******',
             'group': 'root',
             'ssl_cert_ca': '/etc/burp/ssl_cert_ca.pem',
             'server_conf': {
                 'fqdn':
                 _g['id'],
                 'port':
                 '4971',
                 'status_port':
                 '7972',
                 'client_port':
                 '4971',
                 'client_status_port':
                 '7972',
                 'directory':
                 '/data/burp',
                 'clientconfdir':
                 '/etc/burp/clientconfdir',
                 'pidfile':
                 '/var/run/burp.server.pid',
                 'hardlinked_archive':
                 '1',
                 'working_dir_recovery_method':
                 'delete',
                 'max_children':
                 '5',
                 'max_status_children':
                 '5',
                 'umask':
                 '0022',
                 'syslog':
                 '1',
                 'stdout':
                 '0',
                 'client_can_delete':
                 '0',
                 'client_can_force_backup':
                 '0',
                 'client_can_list':
                 '1',
                 'client_can_restore':
                 '1',
                 'client_can_verify':
                 '1',
                 'version_warn':
                 '1',
                 'keep': [7, 4, 6],
                 # s (sec), m (min), h (hours), d (day), w (week), n (month)
                 # Allow backups to start in the evenings and nights
                 # during weekdays
                 # Allow more hours at the weekend.
                 'timer_script':
                 '/etc/burp/timer_script',
                 'timer_arg': [
                     '28h',
                     ('Mon,Tue,Wed,Thu,Fri,'
                      '00,01,02,03,04,05,06,07,08,09,10,11,12,'
                      '13,14,15,16,17,18,19,20,21,22,23'),
                     ('Sat,Sun,'
                      '00,01,02,03,04,05,06,07,08,09,10,11,12,'
                      '13,14,15,16,17,18,19,20,21,22,23')
                 ],
                 'ca_conf':
                 '/etc/burp/CA.cnf',
                 'ca_name':
                 'burpCA',
                 'ca_server_name':
                 _g['id'],
                 'ca_burp_ca':
                 '/usr/sbin/burp_ca',
                 'ssl_cert':
                 '/etc/burp/ssl_cert-server.pem',
                 'ssl_key':
                 '/etc/burp/ssl_cert-server.key',
                 'ssl_key_password':
                 server_pass,
                 'ssl_dhfile':
                 '/etc/burp/dhfile.pem',
                 'notify_failure_script':
                 '/etc/burp/notify_script',
                 'notify_failure_arg': [
                     'sendmail -t', 'To: root',
                     'From: "burp {0}" <*****@*****.**>'.format(
                         _g['id']), 'Subject: %b failed: %c %w'
                 ],
                 'server_script_pre':
                 None,
                 'server_script_pre_arg':
                 None,
                 'server_script_pre_notify':
                 '0',
                 'server_script_post':
                 None,
                 'server_script_post_arg':
                 None,
                 'server_script_post_run_on_fail':
                 '0',
                 'server_script_post_notify':
                 '0',
                 'restore_client':
                 None,
                 'restore_port':
                 '4973',
                 'restore_status_port':
                 '7974',
                 'restore_client_port':
                 '4973',
                 'restore_client_status_port':
                 '7974',
                 'restore_pidfile':
                 '/var/run/burp.restore.pid',
                 'restore_lockfile':
                 '/var/run/burp-server-restore.lock',
                 'lockfile':
                 '/var/run/burp-server.lock',
             },
             'client_common': {
                 'dedup_group': 'linux',
                 'mode': 'client',
                 'port': '4971',
                 'restore_port': '4973',
                 'restore_status_port': '4974',
                 'syslog': '0',
                 'pidfile': '/var/run/burp.client.pid',
                 'restore_pidfile': '/var/run/burp.clientrestore.pid',
                 'stdout': '1',
                 'progress_counter': '1',
                 'ratelimit': None,
                 'network_timeout': '7200',
                 'autoupgrade_dir': None,
                 'autoupgrade_os': None,
                 'server_can_restore': '1',
                 'cross_filesystem': [],
                 'cross_all_filesystems': '0',
                 'encryption_password': None,
                 'ca_burp_ca': None,
                 'ca_csr_dir': None,
                 'ssl_cert': '/etc/burp/ssl_cert-client.pem',
                 'ssl_key': '/etc/burp/ssl_cert-client.key',
                 'ssl_ciphers': None,
                 'backup_script_pre': None,
                 'backup_script_post': None,
                 'restore_script_pre': None,
                 'restore_script_post': None,
                 'include': ['/'],
                 'exclude': None,
                 'restore_lockfile': '/var/run/burp-client.restore.lock',
                 'lockfile': '/var/run/burp-client.lock',
                 'exclude_ext': ['pyc', 'pyo'],
                 'exclude_regex': None,
                 'exclude_fs': ['sysfs', 'tmpfs'],
                 'min_file_size': None,
                 'max_file_size': None,
                 'nobackup': '.nobackup',
                 'read_fifo': None,
                 'read_all_fifos': None,
                 'read_blockdev': None,
                 'read_all_blockdevs': '0',
                 'exclude_comp': None,
                 'cron_periodicity': ("* * * * *"),
                 'cron_cmd': (" {user} /etc/burp/cron.sh 1>/dev/null 2>&1")
             },
             'whitelist': [
                 # list of sources ip to allow connection
             ],
             'clients': {
                 # mapping of clients confs (defined in pillar),
             }
         })
     for k in ['user', 'group', 'ssl_cert_ca']:
         data['server_conf'][k] = data['client_common'][k] = data[k]
     data['client_common']['cron_cmd'].format(user=data['user'])
     data['client_common'].setdefault('server', data['server_conf']['fqdn'])
     data['client_common'].setdefault('ssl_peer_cn',
                                      data['server_conf']['fqdn'])
     data['clients'].setdefault(data['server_conf']['fqdn'], {})
     tries_per_hour = 1
     hour = [i * (60 / tries_per_hour) for i in range(tries_per_hour)]
     removes = []
     for cname in [a for a in data['clients']]:
         cl = data['clients'][cname]
         if not isinstance(cl, dict):
             removes.append(cname)
             continue
         cl['cname'] = cname
         ssh_port = cl.get('ssh_port', '')
         if ssh_port:
             ssh_port = '-p {0}'.format(ssh_port)
         cl['rsh_cmd'] = 'ssh {1} {2} {4} {0} {3}'.format(
             '-oStrictHostKeyChecking=no',
             # Set hosts key database path to /dev/null, ie, non-existing
             '-oUserKnownHostsFile=/dev/null',
             # Don't re-use the SSH connection. Less failures.
             '-oControlPath=none',
             ssh_port,
             '-oPreferredAuthentications=publickey')
         cl['rsh_dst'] = '{1}@{0}'.format(cname,
                                          cl.get('ssh_username', 'root'))
         cl['ssh_cmd'] = cl['rsh_cmd'] + '{1}@{0}'.format(
             cname, cl.get('ssh_username', 'root'))
         if 'ssh_gateway' in cl:
             ssh_gateway_key = ''
             if 'ssh_gateway_key' in cl:
                 ssh_gateway_key = '-i {0}'.format(cl['ssh_gateway_key'])
             ssh_gateway = cl['ssh_gateway']
             ssh_gateway_port = ''
             if ':' in ssh_gateway:
                 ssh_gateway, ssh_gateway_port = ssh_gateway.split(':')
             if ssh_gateway_port:
                 ssh_gateway_port = '-p {0}'.format(ssh_gateway_port)
             # Setup ProxyCommand
             proxy_cmd = (
                 ' -oProxyCommand="'
                 'ssh {0} {1} {2} {7} {3} {4}@{5} {6} '
                 'nc -q0 %h %p"').format(
                     '-oStrictHostKeyChecking=no',
                     # Set hosts key database path to /dev/null
                     '-oUserKnownHostsFile=/dev/null',
                     # Don't re-use the SSH connection. Less failures.
                     '-oControlPath=none',
                     ssh_gateway_key,
                     cl.get('ssh_gateway_user', 'root'),
                     ssh_gateway,
                     ssh_gateway_port,
                     '-oPreferredAuthentications=publickey')
             cl['ssh_cmd'] += proxy_cmd
             cl['rsh_cmd'] += proxy_cmd
         cl.setdefault('activated', True)
         cl.setdefault('restore_client', '')
         restore_clients = [
             a for a in cl['restore_client'].split(',') if a.strip()
         ]
         if not data['server_conf']['fqdn'] in restore_clients:
             restore_clients.append(data['server_conf']['fqdn'])
         cl['restore_client'] = ','.join(restore_clients)
         if cl['cname'] == data['server_conf']['fqdn']:
             # backup host is only a client to query backups
             # and restore
             # we usually do not backup the backups locally
             excreg = cl.setdefault('exclude_regex', [])
             if data['server_conf']['directory'] not in excreg:
                 excreg.append('{0}.*'.format(
                     data['server_conf']['directory'].replace('/', '.*')))
             cl.setdefault('activated', True)
             cl['port'] = data['server_conf']['client_port']
             cl['status_port'] = data['server_conf']['client_status_port']
         if not cl['activated']:
             cl['include'] = []
             cl['cross_all_filesystems'] = []
             cl['exclude_regex'] = ['.*']
         cpassk = ('clients_passwords.{0}'.format(cname))
         cpass = local_conf.get(cpassk, secure_password(32))
         local_conf[cpassk] = cpass
         cl['ssl_key_password'] = cpass
         cpassk = ('clients_clipasswords.{0}'.format(cname))
         cpass = local_conf.get(cpassk, secure_password(32))
         local_conf[cpassk] = cpass
         cl['password'] = cpass
         for k, val in data['client_common'].items():
             # spray around the periodicity to spray the backup load
             # all over the hour.
             if k == 'cron_periodicity':
                 # val = None
                 try:
                     val = timers.get(cname, None)
                     tries = val.split()[0].split(',')
                     if not len(tries) == tries_per_hour:
                         val = None
                 except Exception:
                     val = None
                 if not val:
                     per = hour[:]
                     for ix, item in enumerate(per[:]):
                         rand = random.randint(0, (60 / tries_per_hour)) - 1
                         if rand < 0:
                             rand = 0
                         item = item + rand
                         if item >= 60:
                             item = item - 60
                         if item not in per:
                             per[ix] = item
                     per = _s['mc_utils.uniquify'](per)
                     val = '{0} */6 * * *'.format(','.join(
                         ["{0}".format(t) for t in per]))
                 timers[cname] = val
             cl.setdefault(k, val)
     to_delete = [
         a for a in local_conf if a.count('makina-states.local.burp.') >= 1
     ]
     for a in to_delete:
         local_conf.pop(a, None)
     for i in removes:
         data['clients'].pop(i, None)
     _s['mc_macros.update_registry_params']('burp',
                                            local_conf,
                                            registry_format='pack')
     return data
Exemple #11
0
    def _settings():
        grains = __grains__
        pillar = __pillar__
        locations = __salt__['mc_locations.settings']()
        local_conf = __salt__['mc_macros.get_local_registry'](
            'slapd', registry_format='pack')
        cn_pass = local_conf.setdefault('cn_pass', secure_password(32))
        dn_pass = local_conf.setdefault('dn_pass', secure_password(32))

        cn_config_files = OrderedDict([
            ('/etc/ldap/slapd.d/cn=config/olcDatabase={1}hdb/'
             'olcOverlay={0}memberof.ldif', {}),
            ('/etc/ldap/slapd.d/cn=config/olcDatabase={1}hdb/'
             'olcOverlay={1}syncprov.ldif', {}),
            ('/etc/ldap/slapd.d/cn=config/'
             'cn=schema.ldif', {}),
            ('/etc/ldap/slapd.d/cn=config/'
             'olcDatabase={1}hdb.ldif', {}),
            ('/etc/ldap/slapd.d/cn=config/'
             'olcDatabase={-1}frontend.ldif', {}),
            ('/etc/ldap/slapd.d/cn=config/'
             'olcDatabase={0}config.ldif', {}),
            ('/etc/default/slapd', {
                'mode': '750'
            }),
            ('/etc/ldap/slapd.d/cn=config/'
             'cn=module{0}.ldif', {}),
        ])
        data = __salt__['mc_utils.defaults'](
            'makina-states.services.dns.slapd',
            {
                'slapd_directory':
                "/etc/ldap/slapd.d",
                'extra_dirs': [
                    '/etc/ldap',
                    '/var/lib/ldap',
                ],
                'fd_ver':
                '1.0.9.1',
                'mode':
                'master',
                'writer_groups': ['ldapwriters'],
                'reader_groups': ['ldapreaders'],
                'admin_groups_acls':
                '',
                'pkgs':
                ['ldap-utils', 'ca-certificates', 'slapd', 'python-ldap'],
                'user':
                '******',
                'group':
                'openldap',
                'service_name':
                'slapd',
                'SLAPD_CONF':
                '/etc/ldap/slapd.d',
                'SLAPD_PIDFILE':
                '',
                'SLAPD_SERVICES':
                'ldaps:/// ldap:/// ldapi:///',
                'SLAPD_NO_START':
                '',
                'SLAPD_SENTINEL_FILE':
                '/etc/ldap/noslapd',
                'SLAPD_OPTIONS':
                '',
                'init_ldif':
                'salt://makina-states/files/etc/ldap/init.ldif',
                'config_dn':
                'cn=config',
                'config_cn':
                'config',
                'cn_config_files':
                cn_config_files,
                'config_rootdn':
                'cn=admin,cn=config',
                'config_pw':
                cn_pass,
                'econfig_pw':
                '',
                'group_ou':
                'Group',
                'dn':
                'dc=sample,dc=com',
                'verify_client':
                'never',
                'root_dn':
                None,
                'root_pw':
                dn_pass,
                'eroot_pw':
                '',
                'loglevel':
                'sync',
                'non_anonymous':
                True,
                'syncprov':
                True,
                'syncrepl':
                OrderedDict([
                    ('starttls', 'yes'),
                    ('tls_reqcert', 'allow'),
                    ('timeout', 3),
                    # ('attrs', '*,+'),
                    ('scope', 'sub'),
                    ('retry', '5 5 5 +'),
                    ('sizelimit', 'unlimited'),
                    ('type', 'refreshAndPersist'),
                    ('interval', '00:00:04:00')
                ]),
                'olcloglevel':
                'sync',
                'cert_domain':
                grains['id'],
                'acls': [],
                'acls_schema':
                default_acl_schema,
                'master_uri':
                '',
                'default_schema':
                True,
                'schemas': [],
                'fd_schema':
                True
            })
        data['syncrepl'].setdefault('searchbase', data['dn'])
        local_conf['cn_pass'] = data['config_pw']
        local_conf['dn_pass'] = data['root_pw']
        for k in ['eroot_pw', 'econfig_pw']:
            if not data[k]:
                data[k] = sha_pw(data[k[1:]])
        if not data['root_dn']:
            data['root_dn'] = 'cn=admin,{0}'.format(data['dn'])
        cn_config_files = data['cn_config_files']
        schemas = data['schemas']
        cn_config_files = data['cn_config_files']
        if data['default_schema']:
            for i in [
                    '/etc/ldap/slapd.d/cn=config.ldif',
                    '/etc/ldap/slapd.d/cn=config/cn=schema/cn={0}core.ldif',
                    '/etc/ldap/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif',
                ('/etc/ldap/slapd.d/cn=config/'
                 'cn=schema/cn={2}inetorgperson.ldif'),
                    '/etc/ldap/slapd.d/cn=config/cn=schema/cn={3}misc.ldif',
                    # ('/etc/ldap/slapd.d/cn=config/'
                    #  'cn=schema/cn={21}rfc2307bis.ldif'),
                ('/etc/ldap/slapd.d/cn=config/'
                 'cn=schema/cn={4}nis.ldif'),
                    # '/etc/ldap/slapd.d/cn=config/cn=schema/cn={19}mozilla.ldif',
                    # '/etc/ldap/slapd.d/cn=config/cn=schema/cn={20}extension.ldif',
            ]:
                if ('cn=schema/cn=' in i) and data['fd_schema']:
                    continue
                if i not in schemas:
                    schemas.append(i)
                if i not in cn_config_files:
                    cn_config_files[i] = {}
        for mode, key in OrderedDict([('writer', 'manage'), (
                'reader',
                'read',
        )]).items():
            for group in data['{0}_groups'.format(mode)]:
                match = 'cn={0},'.format(group)
                if match in data['admin_groups_acls']:
                    continue
                data['admin_groups_acls'] += (
                    " by group.exact=\"cn={0},ou={data[group_ou]},{data[dn]}\" {1}"
                ).format(group, key, data=data)
        if data['non_anonymous']:
            for ix in range(len(data['acls_schema'])):
                acl = data['acls_schema'][ix]
                if 'by anonymous' in acl:
                    for i in ['read', 'write', 'none', 'auth']:
                        acl.replace('by anonymous {0}'.format(i),
                                    'by anonymous none')
                elif 'by *' in acl and 'anonymous' not in acl:
                    acl = acl.replace('by *', 'by anonymous auth by *')
                    data['acls_schema'][ix] = acl
        if not data['acls']:
            acls = []
            for a in data['acls_schema'][:]:
                acl = a.format(data=data)

                acls.append(acl)
            data['acls'] = acls
        s_aclchema = ''
        if data['acls']:
            s_aclchema = encode_ldap('olcAccess', data['acls'])
        data['s_aclchema'] = s_aclchema
        # deployed now via file.recurse
        #if data['fd_schema']:
        #    for i in [
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={22}samba.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={23}core-fd.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={24}core-fd-conf.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={25}sudo-fd-conf.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={26}sudo.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={27}service-fd.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={28}systems-fd-conf.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={29}systems-fd.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={30}recovery-fd.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={31}mail-fd-conf.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={32}mail-fd.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={33}gpg-fd.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={34}ldapns.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={35}openssh-lpk.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={36}pgp-keyserver.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={37}pgp-recon.ldif'),
        #        ('/etc/ldap/slapd.d/cn=config/'
        #         'cn=schema/cn={38}pgp-remte-prefs.ldif'),
        #    ]:
        #        if i not in schemas:
        #            schemas.append(i)
        #        if i not in cn_config_files:
        #            cn_config_files[i] = {}
        srepl = ''
        keys = [a for a in data['syncrepl']]
        keys.sort(key=order_syncrepl)
        if data['syncrepl'].get('provider', ''):
            for k in keys:
                val = data['syncrepl'][k]
                srepl += ' {0}={1}'.format(k, sync_ldap_quote(k, val))
                srepl = srepl.strip()
                data['c_syncrepl'] = srepl
            data['s_syncrepl'] = encode_ldap("olcSyncrepl", srepl)
        __salt__['mc_macros.update_registry_params']('slapd',
                                                     local_conf,
                                                     registry_format='pack')
        for cfg in data['cn_config_files']:
            cdata = data['cn_config_files'][cfg]
            cdata.setdefault(
                'source', "salt://makina-states/files{0}".format(
                    cfg.replace(
                        'slapd.d/cn=config/cn=schema/',
                        'slapd.d/cn=config/cn=schema/{0}/'.format(
                            data['fd_ver']))))
        return data
Exemple #12
0
def salt_pw(pw):
    salt = secure_password(8)
    h = hashlib.sha1(pw)
    h.update(salt)
    return encode("{SSHA}" + encode(h.digest() + salt))
Exemple #13
0
def generate_password(length=None):
    return secure_password(length)
Exemple #14
0
def create(vm_):
    '''
    Create a single VM from a data dict
    '''
    deploy = config.get_cloud_config_value('deploy', vm_, __opts__)
    key_filename = config.get_cloud_config_value(
        'ssh_key_file', vm_, __opts__, search_global=False, default=None
    )
    if key_filename is not None and not os.path.isfile(key_filename):
        raise SaltCloudConfigError(
            'The defined ssh_key_file {0!r} does not exist'.format(
                key_filename
            )
        )

    if deploy is True and key_filename is None and \
            salt.utils.which('sshpass') is None:
        raise SaltCloudSystemExit(
            'Cannot deploy salt in a VM if the \'ssh_key_file\' setting '
            'is not set and \'sshpass\' binary is not present on the '
            'system for the password.'
        )

    vm_['key_filename'] = key_filename

    salt.utils.cloud.fire_event(
        'event',
        'starting create',
        'salt/cloud/{0}/creating'.format(vm_['name']),
        {
            'name': vm_['name'],
            'profile': vm_['profile'],
            'provider': vm_['provider'],
        },
        transport=__opts__['transport']
    )
    conn = get_conn()

    if 'instance_id' in vm_:
        # This was probably created via another process, and doesn't have
        # things like salt keys created yet, so let's create them now.
        if 'pub_key' not in vm_ and 'priv_key' not in vm_:
            log.debug('Generating minion keys for {0[name]!r}'.format(vm_))
            vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
                salt.config.get_cloud_config_value(
                    'keysize',
                    vm_,
                    __opts__
                )
            )
        data = conn.server_show_libcloud(vm_['instance_id'])
        if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True:
            vm_['password'] = sup.secure_password()
            conn.root_password(vm_['instance_id'], vm_['password'])
    else:
        # Put together all of the information required to request the instance,
        # and then fire off the request for it
        data, vm_ = request_instance(vm_)

        # Pull the instance ID, valid for both spot and normal instances
        vm_['instance_id'] = data.id

    def __query_node_data(vm_, data):
        try:
            node = show_instance(vm_['name'], 'action')
            log.debug(
                'Loaded node data for {0}:\n{1}'.format(
                    vm_['name'],
                    pprint.pformat(node)
                )
            )
        except Exception as err:
            log.error(
                'Failed to get nodes list: {0}'.format(
                    err
                ),
                # Show the traceback if the debug logging level is enabled
                exc_info_on_loglevel=logging.DEBUG
            )
            # Trigger a failure in the wait for IP function
            return False

        running = node['state'] == 'ACTIVE'
        if not running:
            # Still not running, trigger another iteration
            return

        if rackconnect(vm_) is True:
            extra = node.get('extra', {})
            rc_status = extra.get('metadata', {}).get(
                'rackconnect_automation_status', '')
            access_ip = extra.get('access_ip', '')

            if rc_status != 'DEPLOYED':
                log.debug('Waiting for Rackconnect automation to complete')
                return

        if managedcloud(vm_) is True:
            extra = conn.server_show_libcloud(
                node['id']
            ).extra
            mc_status = extra.get('metadata', {}).get(
                'rax_service_level_automation', '')

            if mc_status != 'Complete':
                log.debug('Waiting for managed cloud automation to complete')
                return

        result = []

        if 'private_ips' not in node and 'public_ips' not in node and \
           'access_ip' in node.get('extra', {}):
            result = [node['extra']['access_ip']]

        private = node.get('private_ips', [])
        public = node.get('public_ips', [])
        if private and not public:
            log.warn(
                'Private IPs returned, but not public... Checking for '
                'misidentified IPs'
            )
            for private_ip in private:
                private_ip = preferred_ip(vm_, [private_ip])
                if salt.utils.cloud.is_public_ip(private_ip):
                    log.warn('{0} is a public IP'.format(private_ip))
                    data.public_ips.append(private_ip)
                    log.warn(
                        (
                            'Public IP address was not ready when we last'
                            ' checked.  Appending public IP address now.'
                        )
                    )
                    public = data.public_ips
                else:
                    log.warn('{0} is a private IP'.format(private_ip))
                    ignore_ip = ignore_cidr(vm_, private_ip)
                    if private_ip not in data.private_ips and not ignore_ip:
                        result.append(private_ip)

        if rackconnect(vm_) is True:
            if ssh_interface(vm_) != 'private_ips':
                data.public_ips = access_ip
                return data

        if result:
            log.debug('result = {0}'.format(result))
            data.private_ips = result
            if ssh_interface(vm_) == 'private_ips':
                return data

        if public:
            data.public_ips = public
            if ssh_interface(vm_) != 'private_ips':
                return data

    try:
        data = salt.utils.cloud.wait_for_ip(
            __query_node_data,
            update_args=(vm_, data),
            timeout=config.get_cloud_config_value(
                'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
            interval=config.get_cloud_config_value(
                'wait_for_ip_interval', vm_, __opts__, default=10),
        )
    except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
        try:
            # It might be already up, let's destroy it!
            destroy(vm_['name'])
        except SaltCloudSystemExit:
            pass
        finally:
            raise SaltCloudSystemExit(exc.message)

    log.debug('VM is now running')

    if ssh_interface(vm_) == 'private_ips':
        ip_address = preferred_ip(vm_, data.private_ips)
    elif rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips':
        ip_address = data.public_ips
    else:
        ip_address = preferred_ip(vm_, data.public_ips)
    log.debug('Using IP address {0}'.format(ip_address))

    if not ip_address:
        raise SaltCloudSystemExit('A valid IP address was not found')

    vm_['ssh_host'] = ip_address
    ret = salt.utils.cloud.bootstrap(vm_, __opts__)

    ret.update(data.__dict__)

    if 'password' in ret['extra']:
        del ret['extra']['password']

    log.info('Created Cloud VM {0[name]!r}'.format(vm_))
    log.debug(
        '{0[name]!r} VM creation details:\n{1}'.format(
            vm_, pprint.pformat(data)
        )
    )

    salt.utils.cloud.fire_event(
        'event',
        'created instance',
        'salt/cloud/{0}/created'.format(vm_['name']),
        {
            'name': vm_['name'],
            'profile': vm_['profile'],
            'provider': vm_['provider'],
        },
        transport=__opts__['transport']
    )

    return ret
def create(vm_):
    """
    Create a single VM from a data dict
    """
    deploy = config.get_cloud_config_value("deploy", vm_, __opts__)
    key_filename = config.get_cloud_config_value("ssh_key_file", vm_, __opts__, search_global=False, default=None)
    if key_filename is not None and not os.path.isfile(key_filename):
        raise SaltCloudConfigError("The defined ssh_key_file {0!r} does not exist".format(key_filename))

    vm_["key_filename"] = key_filename

    salt.utils.cloud.fire_event(
        "event",
        "starting create",
        "salt/cloud/{0}/creating".format(vm_["name"]),
        {"name": vm_["name"], "profile": vm_["profile"], "provider": vm_["provider"]},
        transport=__opts__["transport"],
    )
    conn = get_conn()

    if "instance_id" in vm_:
        # This was probably created via another process, and doesn't have
        # things like salt keys created yet, so let's create them now.
        if "pub_key" not in vm_ and "priv_key" not in vm_:
            log.debug("Generating minion keys for {0[name]!r}".format(vm_))
            vm_["priv_key"], vm_["pub_key"] = salt.utils.cloud.gen_keys(
                salt.config.get_cloud_config_value("keysize", vm_, __opts__)
            )
        data = conn.server_show_libcloud(vm_["instance_id"])
        if vm_["key_filename"] is None and "change_password" in __opts__ and __opts__["change_password"] is True:
            vm_["password"] = sup.secure_password()
            conn.root_password(vm_["instance_id"], vm_["password"])
    else:
        # Put together all of the information required to request the instance,
        # and then fire off the request for it
        data, vm_ = request_instance(vm_)

        # Pull the instance ID, valid for both spot and normal instances
        vm_["instance_id"] = data.id

    def __query_node_data(vm_, data):
        try:
            node = show_instance(vm_["name"], "action")
            log.debug("Loaded node data for {0}:\n{1}".format(vm_["name"], pprint.pformat(node)))
        except Exception as err:
            log.error(
                "Failed to get nodes list: {0}".format(err),
                # Show the traceback if the debug logging level is enabled
                exc_info_on_loglevel=logging.DEBUG,
            )
            # Trigger a failure in the wait for IP function
            return False

        running = node["state"] == "ACTIVE"
        if not running:
            # Still not running, trigger another iteration
            return

        if rackconnect(vm_) is True:
            extra = node.get("extra", {})
            rc_status = extra.get("metadata", {}).get("rackconnect_automation_status", "")
            access_ip = extra.get("access_ip", "")

            if rc_status != "DEPLOYED":
                log.debug("Waiting for Rackconnect automation to complete")
                return

        if managedcloud(vm_) is True:
            extra = conn.server_show_libcloud(node["id"]).extra
            mc_status = extra.get("metadata", {}).get("rax_service_level_automation", "")

            if mc_status != "Complete":
                log.debug("Waiting for managed cloud automation to complete")
                return

        result = []

        if "private_ips" not in node and "public_ips" not in node and "access_ip" in node.get("extra", {}):
            result = [node["extra"]["access_ip"]]

        private = node.get("private_ips", [])
        public = node.get("public_ips", [])
        if private and not public:
            log.warn("Private IPs returned, but not public... Checking for " "misidentified IPs")
            for private_ip in private:
                private_ip = preferred_ip(vm_, [private_ip])
                if salt.utils.cloud.is_public_ip(private_ip):
                    log.warn("{0} is a public IP".format(private_ip))
                    data.public_ips.append(private_ip)
                    log.warn(
                        ("Public IP address was not ready when we last" " checked.  Appending public IP address now.")
                    )
                    public = data.public_ips
                else:
                    log.warn("{0} is a private IP".format(private_ip))
                    ignore_ip = ignore_cidr(vm_, private_ip)
                    if private_ip not in data.private_ips and not ignore_ip:
                        result.append(private_ip)

        if rackconnect(vm_) is True:
            if ssh_interface(vm_) != "private_ips":
                data.public_ips = access_ip
                return data

        if result:
            log.debug("result = {0}".format(result))
            data.private_ips = result
            if ssh_interface(vm_) == "private_ips":
                return data

        if public:
            data.public_ips = public
            if ssh_interface(vm_) != "private_ips":
                return data

    try:
        data = salt.utils.cloud.wait_for_ip(
            __query_node_data,
            update_args=(vm_, data),
            timeout=config.get_cloud_config_value("wait_for_ip_timeout", vm_, __opts__, default=10 * 60),
            interval=config.get_cloud_config_value("wait_for_ip_interval", vm_, __opts__, default=10),
        )
    except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
        try:
            # It might be already up, let's destroy it!
            destroy(vm_["name"])
        except SaltCloudSystemExit:
            pass
        finally:
            raise SaltCloudSystemExit(str(exc))

    log.debug("VM is now running")

    if ssh_interface(vm_) == "private_ips":
        ip_address = preferred_ip(vm_, data.private_ips)
    elif rackconnect(vm_) is True and ssh_interface(vm_) != "private_ips":
        ip_address = data.public_ips
    else:
        ip_address = preferred_ip(vm_, data.public_ips)
    log.debug("Using IP address {0}".format(ip_address))

    if not ip_address:
        raise SaltCloudSystemExit("A valid IP address was not found")

    vm_["ssh_host"] = ip_address
    ret = salt.utils.cloud.bootstrap(vm_, __opts__)

    ret.update(data.__dict__)

    if "password" in ret["extra"]:
        del ret["extra"]["password"]

    log.info("Created Cloud VM {0[name]!r}".format(vm_))
    log.debug("{0[name]!r} VM creation details:\n{1}".format(vm_, pprint.pformat(data)))

    salt.utils.cloud.fire_event(
        "event",
        "created instance",
        "salt/cloud/{0}/created".format(vm_["name"]),
        {"name": vm_["name"], "profile": vm_["profile"], "provider": vm_["provider"]},
        transport=__opts__["transport"],
    )

    return ret
Exemple #16
0
def create(vm_):
    '''
    Create a single VM from a data dict
    '''
    deploy = config.get_cloud_config_value('deploy', vm_, __opts__)
    key_filename = config.get_cloud_config_value('ssh_key_file',
                                                 vm_,
                                                 __opts__,
                                                 search_global=False,
                                                 default=None)
    if key_filename is not None and not os.path.isfile(key_filename):
        raise SaltCloudConfigError(
            'The defined ssh_key_file {0!r} does not exist'.format(
                key_filename))

    if deploy is True and key_filename is None and \
            salt.utils.which('sshpass') is None:
        raise SaltCloudSystemExit(
            'Cannot deploy salt in a VM if the \'ssh_key_file\' setting '
            'is not set and \'sshpass\' binary is not present on the '
            'system for the password.')

    vm_['key_filename'] = key_filename

    salt.utils.cloud.fire_event('event',
                                'starting create',
                                'salt/cloud/{0}/creating'.format(vm_['name']),
                                {
                                    'name': vm_['name'],
                                    'profile': vm_['profile'],
                                    'provider': vm_['provider'],
                                },
                                transport=__opts__['transport'])
    conn = get_conn()

    if 'instance_id' in vm_:
        # This was probably created via another process, and doesn't have
        # things like salt keys created yet, so let's create them now.
        if 'pub_key' not in vm_ and 'priv_key' not in vm_:
            log.debug('Generating minion keys for {0[name]!r}'.format(vm_))
            vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
                salt.config.get_cloud_config_value('keysize', vm_, __opts__))
        data = conn.server_show_libcloud(vm_['instance_id'])
        if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__[
                'change_password'] is True:
            vm_['password'] = sup.secure_password()
            conn.root_password(vm_['instance_id'], vm_['password'])
    else:
        # Put together all of the information required to request the instance,
        # and then fire off the request for it
        data, vm_ = request_instance(vm_)

        # Pull the instance ID, valid for both spot and normal instances
        vm_['instance_id'] = data.id

    def __query_node_data(vm_, data):
        try:
            node = show_instance(vm_['name'], 'action')
            log.debug('Loaded node data for {0}:\n{1}'.format(
                vm_['name'], pprint.pformat(node)))
        except Exception as err:
            log.error(
                'Failed to get nodes list: {0}'.format(err),
                # Show the traceback if the debug logging level is enabled
                exc_info_on_loglevel=logging.DEBUG)
            # Trigger a failure in the wait for IP function
            return False

        running = node['state'] == 'ACTIVE'
        if not running:
            # Still not running, trigger another iteration
            return

        if rackconnect(vm_) is True:
            extra = node.get('extra', {})
            rc_status = extra.get('metadata',
                                  {}).get('rackconnect_automation_status', '')
            access_ip = extra.get('access_ip', '')

            if rc_status != 'DEPLOYED':
                log.debug('Waiting for Rackconnect automation to complete')
                return

        if managedcloud(vm_) is True:
            extra = conn.server_show_libcloud(node['id']).extra
            mc_status = extra.get('metadata',
                                  {}).get('rax_service_level_automation', '')

            if mc_status != 'Complete':
                log.debug('Waiting for managed cloud automation to complete')
                return

        result = []

        if 'private_ips' not in node and 'public_ips' not in node and \
           'access_ip' in node.get('extra', {}):
            result = [node['extra']['access_ip']]

        private = node.get('private_ips', [])
        public = node.get('public_ips', [])
        if private and not public:
            log.warn('Private IPs returned, but not public... Checking for '
                     'misidentified IPs')
            for private_ip in private:
                private_ip = preferred_ip(vm_, [private_ip])
                if salt.utils.cloud.is_public_ip(private_ip):
                    log.warn('{0} is a public IP'.format(private_ip))
                    data.public_ips.append(private_ip)
                    log.warn(('Public IP address was not ready when we last'
                              ' checked.  Appending public IP address now.'))
                    public = data.public_ips
                else:
                    log.warn('{0} is a private IP'.format(private_ip))
                    ignore_ip = ignore_cidr(vm_, private_ip)
                    if private_ip not in data.private_ips and not ignore_ip:
                        result.append(private_ip)

        if rackconnect(vm_) is True:
            if ssh_interface(vm_) != 'private_ips':
                data.public_ips = access_ip
                return data

        if result:
            log.debug('result = {0}'.format(result))
            data.private_ips = result
            if ssh_interface(vm_) == 'private_ips':
                return data

        if public:
            data.public_ips = public
            if ssh_interface(vm_) != 'private_ips':
                return data

    try:
        data = salt.utils.cloud.wait_for_ip(
            __query_node_data,
            update_args=(vm_, data),
            timeout=config.get_cloud_config_value('wait_for_ip_timeout',
                                                  vm_,
                                                  __opts__,
                                                  default=10 * 60),
            interval=config.get_cloud_config_value('wait_for_ip_interval',
                                                   vm_,
                                                   __opts__,
                                                   default=10),
        )
    except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
        try:
            # It might be already up, let's destroy it!
            destroy(vm_['name'])
        except SaltCloudSystemExit:
            pass
        finally:
            raise SaltCloudSystemExit(exc.message)

    log.debug('VM is now running')

    if ssh_interface(vm_) == 'private_ips':
        ip_address = preferred_ip(vm_, data.private_ips)
    elif rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips':
        ip_address = data.public_ips
    else:
        ip_address = preferred_ip(vm_, data.public_ips)
    log.debug('Using IP address {0}'.format(ip_address))

    if get_salt_interface(vm_) == 'private_ips':
        salt_ip_address = preferred_ip(vm_, data.private_ips)
        log.info('Salt interface set to: {0}'.format(salt_ip_address))
    elif rackconnect(vm_) is True and get_salt_interface(vm_) != 'private_ips':
        salt_ip_address = data.public_ips
    else:
        salt_ip_address = preferred_ip(vm_, data.public_ips)
        log.debug('Salt interface set to: {0}'.format(salt_ip_address))

    if not ip_address:
        raise SaltCloudSystemExit('A valid IP address was not found')

    vm_['ssh_host'] = ip_address
    vm_['salt_host'] = salt_ip_address

    ret = salt.utils.cloud.bootstrap(vm_, __opts__)

    ret.update(data.__dict__)

    if 'password' in ret['extra']:
        del ret['extra']['password']

    log.info('Created Cloud VM {0[name]!r}'.format(vm_))
    log.debug('{0[name]!r} VM creation details:\n{1}'.format(
        vm_, pprint.pformat(data)))

    salt.utils.cloud.fire_event('event',
                                'created instance',
                                'salt/cloud/{0}/created'.format(vm_['name']), {
                                    'name': vm_['name'],
                                    'profile': vm_['profile'],
                                    'provider': vm_['provider'],
                                },
                                transport=__opts__['transport'])

    return ret
Exemple #17
0
def create(vm_):
    '''
    Create a single VM from a data dict
    '''
    try:
        # Check for required profile parameters before sending any API calls.
        if vm_['profile'] and config.is_profile_configured(
                __opts__,
                __active_provider_name__ or 'nova',
                vm_['profile'],
                vm_=vm_) is False:
            return False
    except AttributeError:
        pass

    deploy = config.get_cloud_config_value('deploy', vm_, __opts__)
    key_filename = config.get_cloud_config_value('ssh_key_file',
                                                 vm_,
                                                 __opts__,
                                                 search_global=False,
                                                 default=None)
    if key_filename is not None and not os.path.isfile(key_filename):
        raise SaltCloudConfigError(
            'The defined ssh_key_file \'{0}\' does not exist'.format(
                key_filename))

    vm_['key_filename'] = key_filename

    __utils__['cloud.fire_event'](
        'event',
        'starting create',
        'salt/cloud/{0}/creating'.format(vm_['name']),
        args=__utils__['cloud.filter_event'](
            'creating', vm_, ['name', 'profile', 'provider', 'driver']),
        sock_dir=__opts__['sock_dir'],
        transport=__opts__['transport'])
    conn = get_conn()

    if 'instance_id' in vm_:
        # This was probably created via another process, and doesn't have
        # things like salt keys created yet, so let's create them now.
        if 'pub_key' not in vm_ and 'priv_key' not in vm_:
            log.debug('Generating minion keys for \'{0[name]}\''.format(vm_))
            vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
                salt.config.get_cloud_config_value('keysize', vm_, __opts__))
        data = conn.server_show_libcloud(vm_['instance_id'])
        if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__[
                'change_password'] is True:
            vm_['password'] = sup.secure_password()
            conn.root_password(vm_['instance_id'], vm_['password'])
    else:
        # Put together all of the information required to request the instance,
        # and then fire off the request for it
        data, vm_ = request_instance(vm_)

        # Pull the instance ID, valid for both spot and normal instances
        vm_['instance_id'] = data.id

    def __query_node_data(vm_, data):
        try:
            node = show_instance(vm_['name'], 'action')
            log.debug('Loaded node data for {0}:\n{1}'.format(
                vm_['name'], pprint.pformat(node)))
        except Exception as err:
            log.error(
                'Failed to get nodes list: {0}'.format(err),
                # Show the traceback if the debug logging level is enabled
                exc_info_on_loglevel=logging.DEBUG)
            # Trigger a failure in the wait for IP function
            return False

        running = node['state'] == 'ACTIVE'
        if not running:
            # Still not running, trigger another iteration
            return

        if rackconnect(vm_) is True:
            extra = node.get('extra', {})
            rc_status = extra.get('metadata',
                                  {}).get('rackconnect_automation_status', '')
            if rc_status != 'DEPLOYED':
                log.debug('Waiting for Rackconnect automation to complete')
                return

        if managedcloud(vm_) is True:
            extra = conn.server_show_libcloud(node['id']).extra
            mc_status = extra.get('metadata',
                                  {}).get('rax_service_level_automation', '')

            if mc_status != 'Complete':
                log.debug('Waiting for managed cloud automation to complete')
                return

        access_ip = node.get('extra', {}).get('access_ip', '')

        rcv3 = rackconnectv3(vm_) in node['addresses']
        sshif = ssh_interface(vm_) in node['addresses']

        if any((rcv3, sshif)):
            networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_)
            for network in node['addresses'].get(networkname, []):
                if network['version'] is 4:
                    access_ip = network['addr']
                    break
            vm_['cloudnetwork'] = True

        # Conditions to pass this
        #
        #     Rackconnect v2: vm_['rackconnect'] = True
        #         If this is True, then the server will not be accessible from the ipv4 addres in public_ips.
        #         That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the
        #         server.  In this case we can use the private_ips for ssh_interface, or the access_ip.
        #
        #     Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork>
        #         If this is the case, salt will need to use the cloud network to login to the server.  There
        #         is no ipv4 address automatically provisioned for these servers when they are booted.  SaltCloud
        #         also cannot use the private_ips, because that traffic is dropped at the hypervisor.
        #
        #     CloudNetwork: vm['cloudnetwork'] = True
        #         If this is True, then we should have an access_ip at this point set to the ip on the cloud
        #         network.  If that network does not exist in the 'addresses' dictionary, then SaltCloud will
        #         use the initial access_ip, and not overwrite anything.

        if any((cloudnetwork(vm_),
                rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips'
                                        or rcv3) and access_ip != '':
            data.public_ips = [
                access_ip,
            ]
            return data

        result = []

        if 'private_ips' not in node and 'public_ips' not in node and \
           'floating_ips' not in node and 'fixed_ips' not in node and \
           'access_ip' in node.get('extra', {}):
            result = [node['extra']['access_ip']]

        private = node.get('private_ips', [])
        public = node.get('public_ips', [])
        fixed = node.get('fixed_ips', [])
        floating = node.get('floating_ips', [])

        if private and not public:
            log.warning('Private IPs returned, but not public... Checking for '
                        'misidentified IPs')
            for private_ip in private:
                private_ip = preferred_ip(vm_, [private_ip])
                if private_ip is False:
                    continue
                if salt.utils.cloud.is_public_ip(private_ip):
                    log.warning('{0} is a public IP'.format(private_ip))
                    data.public_ips.append(private_ip)
                    log.warning(
                        ('Public IP address was not ready when we last'
                         ' checked.  Appending public IP address now.'))
                    public = data.public_ips
                else:
                    log.warning('{0} is a private IP'.format(private_ip))
                    ignore_ip = ignore_cidr(vm_, private_ip)
                    if private_ip not in data.private_ips and not ignore_ip:
                        result.append(private_ip)

        # populate return data with private_ips
        # when ssh_interface is set to private_ips and public_ips exist
        if not result and ssh_interface(vm_) == 'private_ips':
            for private_ip in private:
                ignore_ip = ignore_cidr(vm_, private_ip)
                if private_ip not in data.private_ips and not ignore_ip:
                    result.append(private_ip)

        non_private_ips = []

        if public:
            data.public_ips = public
            if ssh_interface(vm_) == 'public_ips':
                non_private_ips.append(public)

        if floating:
            data.floating_ips = floating
            if ssh_interface(vm_) == 'floating_ips':
                non_private_ips.append(floating)

        if fixed:
            data.fixed_ips = fixed
            if ssh_interface(vm_) == 'fixed_ips':
                non_private_ips.append(fixed)

        if non_private_ips:
            log.debug('result = {0}'.format(non_private_ips))
            data.private_ips = result
            if ssh_interface(vm_) != 'private_ips':
                return data

        if result:
            log.debug('result = {0}'.format(result))
            data.private_ips = result
            if ssh_interface(vm_) == 'private_ips':
                return data

    try:
        data = salt.utils.cloud.wait_for_ip(
            __query_node_data,
            update_args=(vm_, data),
            timeout=config.get_cloud_config_value('wait_for_ip_timeout',
                                                  vm_,
                                                  __opts__,
                                                  default=10 * 60),
            interval=config.get_cloud_config_value('wait_for_ip_interval',
                                                   vm_,
                                                   __opts__,
                                                   default=10),
        )
    except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
        try:
            # It might be already up, let's destroy it!
            destroy(vm_['name'])
        except SaltCloudSystemExit:
            pass
        finally:
            raise SaltCloudSystemExit(str(exc))

    log.debug('VM is now running')

    if ssh_interface(vm_) == 'private_ips':
        ip_address = preferred_ip(vm_, data.private_ips)
    elif ssh_interface(vm_) == 'fixed_ips':
        ip_address = preferred_ip(vm_, data.fixed_ips)
    elif ssh_interface(vm_) == 'floating_ips':
        ip_address = preferred_ip(vm_, data.floating_ips)
    else:
        ip_address = preferred_ip(vm_, data.public_ips)
    log.debug('Using IP address {0}'.format(ip_address))

    if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
        salt_ip_address = preferred_ip(vm_, data.private_ips)
        log.info('Salt interface set to: {0}'.format(salt_ip_address))
    elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips':
        salt_ip_address = preferred_ip(vm_, data.fixed_ips)
        log.info('Salt interface set to: {0}'.format(salt_ip_address))
    elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips':
        salt_ip_address = preferred_ip(vm_, data.floating_ips)
        log.info('Salt interface set to: {0}'.format(salt_ip_address))
    else:
        salt_ip_address = preferred_ip(vm_, data.public_ips)
        log.debug('Salt interface set to: {0}'.format(salt_ip_address))

    if not ip_address:
        raise SaltCloudSystemExit('A valid IP address was not found')

    vm_['ssh_host'] = ip_address
    vm_['salt_host'] = salt_ip_address

    ret = __utils__['cloud.bootstrap'](vm_, __opts__)

    ret.update(data.__dict__)

    if 'password' in ret['extra']:
        del ret['extra']['password']

    log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
    log.debug('\'{0[name]}\' VM creation details:\n{1}'.format(
        vm_, pprint.pformat(data.__dict__)))

    event_data = {
        'name': vm_['name'],
        'profile': vm_['profile'],
        'provider': vm_['driver'],
        'instance_id': vm_['instance_id'],
        'floating_ips': data.floating_ips,
        'fixed_ips': data.fixed_ips,
        'private_ips': data.private_ips,
        'public_ips': data.public_ips
    }

    __utils__['cloud.fire_event'](
        'event',
        'created instance',
        'salt/cloud/{0}/created'.format(vm_['name']),
        args=__utils__['cloud.filter_event']('created', event_data,
                                             event_data.keys()),
        sock_dir=__opts__['sock_dir'],
        transport=__opts__['transport'])
    __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova',
                                          vm_['driver'])
    return ret
Exemple #18
0
    def _settings():
        grains = __grains__
        pillar = __pillar__
        locations = __salt__['mc_locations.settings']()
        local_conf = __salt__['mc_macros.get_local_registry'](
            'slapd', registry_format='pack')
        cn_pass = local_conf.setdefault('cn_pass', secure_password(32))
        dn_pass = local_conf.setdefault('dn_pass', secure_password(32))

        cn_config_files = [
            ('/etc/ldap/slapd.d/cn=config/olcDatabase={1}hdb/'
             'olcOverlay={0}memberof.ldif'),
            ('/etc/ldap/slapd.d/cn=config/olcDatabase={1}hdb/'
             'olcOverlay={1}syncprov.ldif'),
            '/etc/ldap/slapd.d/cn=config/cn=schema.ldif',
            '/etc/ldap/slapd.d/cn=config/olcDatabase={1}hdb.ldif',
            #'/etc/ldap/slapd.d/cn=config/olcDatabase={-1}frontend.ldif',
            '/etc/ldap/slapd.d/cn=config/olcDatabase={0}config.ldif',
            '/etc/ldap/slapd.d/cn=config/cn=module{0}.ldif',
        ]
        slapdData = __salt__['mc_utils.defaults'](
            'makina-states.services.dns.slapd', {
                'slapd_directory': "/etc/ldap/slapd.d",
                'extra_dirs': [
                    '/etc/ldap',
                    '/var/lib/ldap',
                ],
                'cn_config_files': [],
                'mode': 'master',
                'pkgs': ['ldap-utils', 'ca-certificates',
                         'slapd', 'python-ldap'],
                'user': '******',
                'group': 'openldap',
                'service_name': 'slapd',
                'SLAPD_CONF': '/etc/ldap/slapd.d',
                'SLAPD_PIDFILE': '',
                'SLAPD_SERVICES': 'ldaps:/// ldap:/// ldapi:///',
                'SLAPD_NO_START': "",
                'SLAPD_SENTINEL_FILE': '/etc/ldap/noslapd',
                'SLAPD_OPTIONS': '',
                'init_ldif': 'salt://makina-states/files/etc/ldap/init.ldif',
                'config_dn': 'cn=config',
                'config_cn': 'config',
                'cn_config_files': cn_config_files,
                'config_rootdn': 'cn=admin,cn=config',
                'config_pw': cn_pass,
                'econfig_pw': '',
                'dn': 'dc=sample,dc=com',
                'verify_client': 'never',
                'root_dn': None,
                'root_pw': dn_pass,
                'eroot_pw': '',
                'loglevel': 'sync',
                'syncprov': True,
                'syncrepl': None,
                'olcloglevel': "sync",
                'tls_cacert': '',
                'tls_cert': '',
                'tls_key': '',
                'acls': [],
                'acls_schema': default_acl_schema,
                'master_uri': '',
                'cert_domain': grains['id'],
                'default_schema': True,
                'fd_schema': True,
            })
        local_conf['cn_pass'] = slapdData['config_pw']
        local_conf['dn_pass'] = slapdData['root_pw']
        for k in ['eroot_pw', 'econfig_pw']:
            if not slapdData[k]:
                slapdData[k] = sha_pw(slapdData[k[1:]])
        if not slapdData['root_dn']:
            slapdData['root_dn'] = 'cn=admin,{0}'.format(slapdData['dn'])
        cn_config_files = slapdData['cn_config_files']
        if not slapdData['tls_cert']:
            info = __salt__['mc_ssl.ca_ssl_certs'](slapdData['cert_domain'])[0]
            slapdData['tls_cacert'] = info[0]
            slapdData['tls_cert'] = info[1]
            slapdData['tls_key'] = info[2]
        cn_config_files = slapdData['cn_config_files']
        if slapdData['default_schema']:
            for i in [
                '/etc/ldap/slapd.d/cn=config.ldif',
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={0}core.ldif',
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif',
                ('/etc/ldap/slapd.d/cn=config/'
                 'cn=schema/cn={2}inetorgperson.ldif'),
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={3}misc.ldif',
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={4}rfc2307bis.ldif',
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={8}samba.ldif',
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={11}ldapns.ldif',
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={19}mozilla.ldif',
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={15}sudo.ldif',
                ('/etc/ldap/slapd.d/cn=config/'
                 'cn=schema/cn={17}openssh-lpk.ldif'),
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={20}extension.ldif',
            ]:
                if i not in cn_config_files:
                    cn_config_files.append(i)
        if not slapdData['acls']:
            acls = [a.format(data=slapdData)
                    for a in slapdData['acls_schema'][:]]
            slapdData['acls'] = acls
        if slapdData['fd_schema']:
            for i in [
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={5}service-fd.ldif',
                ('/etc/ldap/slapd.d/cn=config/'
                 'cn=schema/cn={6}systems-fd-conf.ldif'),
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={7}systems-fd.ldif',
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={9}core-fd.ldif',
                ('/etc/ldap/slapd.d/cn=config/cn=schema/'
                 'cn={10}core-fd-conf.ldif'),
                ('/etc/ldap/slapd.d/cn=config/cn=schema/'
                 'cn={12}recovery-fd.ldif'),
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={13}mail-fd.ldif',
                ('/etc/ldap/slapd.d/cn=config/cn=schema/'
                 'cn={14}mail-fd-conf.ldif'),
                ('/etc/ldap/slapd.d/cn=config/cn=schema/'
                 'cn={16}sudo-fd-conf.ldif'),
                '/etc/ldap/slapd.d/cn=config/cn=schema/cn={18}gpg-fd.ldif',
            ]:
                if i not in cn_config_files:
                    cn_config_files.append(i)
        __salt__['mc_macros.update_registry_params'](
            'slapd', local_conf, registry_format='pack')
        return slapdData
Exemple #19
0
def salt_pw(pw):
    salt = secure_password(8)
    h = hashlib.sha1(pw)
    h.update(salt)
    return encode("{SSHA}" + encode(h.digest() + salt))
Exemple #20
0
def generate_password(length=None):
    if length is None:
        length = 16
    return secure_password(length)
Exemple #21
0
def create(vm_):
    '''
    Create a single VM from a data dict
    '''
    try:
        # Check for required profile parameters before sending any API calls.
        if vm_['profile'] and config.is_profile_configured(
                __opts__,
                __active_provider_name__ or 'openstack',
                vm_['profile'],
                vm_=vm_) is False:
            return False
    except AttributeError:
        pass

    deploy = config.get_cloud_config_value('deploy', vm_, __opts__)
    key_filename = config.get_cloud_config_value('ssh_key_file',
                                                 vm_,
                                                 __opts__,
                                                 search_global=False,
                                                 default=None)
    if key_filename is not None:
        key_filename = os.path.expanduser(key_filename)
        if not os.path.isfile(key_filename):
            raise SaltCloudConfigError(
                'The defined ssh_key_file \'{0}\' does not exist'.format(
                    key_filename))

    vm_['key_filename'] = key_filename

    __utils__['cloud.fire_event']('event',
                                  'starting create',
                                  'salt/cloud/{0}/creating'.format(
                                      vm_['name']),
                                  args={
                                      'name': vm_['name'],
                                      'profile': vm_['profile'],
                                      'provider': vm_['driver'],
                                  },
                                  sock_dir=__opts__['sock_dir'],
                                  transport=__opts__['transport'])

    conn = get_conn()

    if 'instance_id' in vm_:
        # This was probably created via another process, and doesn't have
        # things like salt keys created yet, so let's create them now.
        if 'pub_key' not in vm_ and 'priv_key' not in vm_:
            log.debug('Generating minion keys for \'{0[name]}\''.format(vm_))
            vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
                salt.config.get_cloud_config_value('keysize', vm_, __opts__))
        data = conn.ex_get_node_details(vm_['instance_id'])
        if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__[
                'change_password'] is True:
            vm_['password'] = sup.secure_password()
            conn.ex_set_password(data, vm_['password'])
        networks(vm_)
    else:
        # Put together all of the information required to request the instance,
        # and then fire off the request for it
        data, vm_ = request_instance(vm_)

        # Pull the instance ID, valid for both spot and normal instances
        vm_['instance_id'] = data.id

    def __query_node_data(vm_, data, floating):
        try:
            node = show_instance(vm_['name'], 'action')
            log.debug('Loaded node data for {0}:\n{1}'.format(
                vm_['name'], pprint.pformat(node)))
        except Exception as err:
            log.error(
                'Failed to get nodes list: {0}'.format(err),
                # Show the traceback if the debug logging level is enabled
                exc_info_on_loglevel=logging.DEBUG)
            # Trigger a failure in the wait for IP function
            return False

        running = node['state'] == NodeState.RUNNING
        if not running:
            # Still not running, trigger another iteration
            return

        if rackconnect(vm_) is True:
            check_libcloud_version((0, 14, 0), why='rackconnect: True')
            extra = node.get('extra')
            rc_status = extra.get('metadata',
                                  {}).get('rackconnect_automation_status', '')
            access_ip = extra.get('access_ip', '')

            if rc_status != 'DEPLOYED':
                log.debug('Waiting for Rackconnect automation to complete')
                return

        if managedcloud(vm_) is True:
            extra = node.get('extra')
            mc_status = extra.get('metadata',
                                  {}).get('rax_service_level_automation', '')

            if mc_status != 'Complete':
                log.debug('Waiting for managed cloud automation to complete')
                return

        public = node['public_ips']
        if floating:
            try:
                name = data.name
                ip = floating[0].ip_address
                conn.ex_attach_floating_ip_to_node(data, ip)
                log.info(
                    'Attaching floating IP \'{0}\' to node \'{1}\''.format(
                        ip, name))
                data.public_ips.append(ip)
                public = data.public_ips
            except Exception:
                # Note(pabelanger): Because we loop, we only want to attach the
                # floating IP address one. So, expect failures if the IP is
                # already attached.
                pass

        result = []
        private = node['private_ips']
        if private and not public:
            log.warning('Private IPs returned, but not public... Checking for '
                        'misidentified IPs')
            for private_ip in private:
                private_ip = preferred_ip(vm_, [private_ip])
                if salt.utils.cloud.is_public_ip(private_ip):
                    log.warning('{0} is a public IP'.format(private_ip))
                    data.public_ips.append(private_ip)
                    log.warning(
                        'Public IP address was not ready when we last checked.'
                        ' Appending public IP address now.')
                    public = data.public_ips
                else:
                    log.warning('{0} is a private IP'.format(private_ip))
                    ignore_ip = ignore_cidr(vm_, private_ip)
                    if private_ip not in data.private_ips and not ignore_ip:
                        result.append(private_ip)

        if rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips':
            data.public_ips = access_ip
            return data

        # populate return data with private_ips
        # when ssh_interface is set to private_ips and public_ips exist
        if not result and ssh_interface(vm_) == 'private_ips':
            for private_ip in private:
                ignore_ip = ignore_cidr(vm_, private_ip)
                if private_ip not in data.private_ips and not ignore_ip:
                    result.append(private_ip)

        if result:
            log.debug('result = {0}'.format(result))
            data.private_ips = result
            if ssh_interface(vm_) == 'private_ips':
                return data

        if public:
            data.public_ips = public
            if ssh_interface(vm_) != 'private_ips':
                return data

    try:
        data = salt.utils.cloud.wait_for_ip(
            __query_node_data,
            update_args=(vm_, data, vm_['floating']),
            timeout=config.get_cloud_config_value('wait_for_ip_timeout',
                                                  vm_,
                                                  __opts__,
                                                  default=10 * 60),
            interval=config.get_cloud_config_value('wait_for_ip_interval',
                                                   vm_,
                                                   __opts__,
                                                   default=10),
        )
    except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
        try:
            # It might be already up, let's destroy it!
            destroy(vm_['name'])
        except SaltCloudSystemExit:
            pass
        finally:
            raise SaltCloudSystemExit(str(exc))

    log.debug('VM is now running')

    if ssh_interface(vm_) == 'private_ips':
        ip_address = preferred_ip(vm_, data.private_ips)
    elif rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips':
        ip_address = data.public_ips
    else:
        ip_address = preferred_ip(vm_, data.public_ips)
    log.debug('Using IP address {0}'.format(ip_address))

    if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
        salt_ip_address = preferred_ip(vm_, data.private_ips)
        log.info('Salt interface set to: {0}'.format(salt_ip_address))
    else:
        salt_ip_address = preferred_ip(vm_, data.public_ips)
        log.debug('Salt interface set to: {0}'.format(salt_ip_address))

    if not ip_address:
        raise SaltCloudSystemExit('A valid IP address was not found')

    vm_['salt_host'] = salt_ip_address
    vm_['ssh_host'] = ip_address
    ret = __utils__['cloud.bootstrap'](vm_, __opts__)
    ret.update(data.__dict__)

    if hasattr(data, 'extra') and 'password' in data.extra:
        del data.extra['password']

    log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
    log.debug('\'{0[name]}\' VM creation details:\n{1}'.format(
        vm_, pprint.pformat(data.__dict__)))

    __utils__['cloud.fire_event']('event',
                                  'created instance',
                                  'salt/cloud/{0}/created'.format(vm_['name']),
                                  args={
                                      'name': vm_['name'],
                                      'profile': vm_['profile'],
                                      'provider': vm_['driver'],
                                  },
                                  sock_dir=__opts__['sock_dir'],
                                  transport=__opts__['transport'])

    return ret
Exemple #22
0
def create(vm_):
    '''
    Create a single VM from a data dict
    '''
    try:
        # Check for required profile parameters before sending any API calls.
        if config.is_profile_configured(__opts__,
                                        __active_provider_name__ or 'openstack',
                                        vm_['profile']) is False:
            return False
    except AttributeError:
        pass

    # Since using "provider: <provider-engine>" is deprecated, alias provider
    # to use driver: "driver: <provider-engine>"
    if 'provider' in vm_:
        vm_['driver'] = vm_.pop('provider')

    deploy = config.get_cloud_config_value('deploy', vm_, __opts__)
    key_filename = config.get_cloud_config_value(
        'ssh_key_file', vm_, __opts__, search_global=False, default=None
    )
    if key_filename is not None:
        key_filename = os.path.expanduser(key_filename)
        if not os.path.isfile(key_filename):
            raise SaltCloudConfigError(
                'The defined ssh_key_file {0!r} does not exist'.format(
                    key_filename
                )
            )

    vm_['key_filename'] = key_filename

    salt.utils.cloud.fire_event(
        'event',
        'starting create',
        'salt/cloud/{0}/creating'.format(vm_['name']),
        {
            'name': vm_['name'],
            'profile': vm_['profile'],
            'provider': vm_['driver'],
        },
        transport=__opts__['transport']
    )

    conn = get_conn()

    if 'instance_id' in vm_:
        # This was probably created via another process, and doesn't have
        # things like salt keys created yet, so let's create them now.
        if 'pub_key' not in vm_ and 'priv_key' not in vm_:
            log.debug('Generating minion keys for {0[name]!r}'.format(vm_))
            vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
                salt.config.get_cloud_config_value(
                    'keysize',
                    vm_,
                    __opts__
                )
            )
        data = conn.ex_get_node_details(vm_['instance_id'])
        if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True:
            vm_['password'] = sup.secure_password()
            conn.ex_set_password(data, vm_['password'])
        networks(vm_)
    else:
        # Put together all of the information required to request the instance,
        # and then fire off the request for it
        data, vm_ = request_instance(vm_)

        # Pull the instance ID, valid for both spot and normal instances
        vm_['instance_id'] = data.id

    def __query_node_data(vm_, data, floating):
        try:
            node = show_instance(vm_['name'], 'action')
            log.debug(
                'Loaded node data for {0}:\n{1}'.format(
                    vm_['name'],
                    pprint.pformat(node)
                )
            )
        except Exception as err:
            log.error(
                'Failed to get nodes list: {0}'.format(
                    err
                ),
                # Show the traceback if the debug logging level is enabled
                exc_info_on_loglevel=logging.DEBUG
            )
            # Trigger a failure in the wait for IP function
            return False

        running = node['state'] == NodeState.RUNNING
        if not running:
            # Still not running, trigger another iteration
            return

        if rackconnect(vm_) is True:
            check_libcloud_version((0, 14, 0), why='rackconnect: True')
            extra = node.get('extra')
            rc_status = extra.get('metadata', {}).get(
                'rackconnect_automation_status', '')
            access_ip = extra.get('access_ip', '')

            if rc_status != 'DEPLOYED':
                log.debug('Waiting for Rackconnect automation to complete')
                return

        if managedcloud(vm_) is True:
            extra = node.get('extra')
            mc_status = extra.get('metadata', {}).get(
                'rax_service_level_automation', '')

            if mc_status != 'Complete':
                log.debug('Waiting for managed cloud automation to complete')
                return

        public = node['public_ips']
        if floating:
            try:
                name = data.name
                ip = floating[0].ip_address
                conn.ex_attach_floating_ip_to_node(data, ip)
                log.info(
                    'Attaching floating IP {0!r} to node {1!r}'.format(
                        ip, name
                    )
                )
                data.public_ips.append(ip)
                public = data.public_ips
            except Exception:
                # Note(pabelanger): Because we loop, we only want to attach the
                # floating IP address one. So, expect failures if the IP is
                # already attached.
                pass

        result = []
        private = node['private_ips']
        if private and not public:
            log.warn(
                'Private IPs returned, but not public... Checking for '
                'misidentified IPs'
            )
            for private_ip in private:
                private_ip = preferred_ip(vm_, [private_ip])
                if salt.utils.cloud.is_public_ip(private_ip):
                    log.warn('{0} is a public IP'.format(private_ip))
                    data.public_ips.append(private_ip)
                    log.warn(
                        'Public IP address was not ready when we last checked.'
                        ' Appending public IP address now.'
                    )
                    public = data.public_ips
                else:
                    log.warn('{0} is a private IP'.format(private_ip))
                    ignore_ip = ignore_cidr(vm_, private_ip)
                    if private_ip not in data.private_ips and not ignore_ip:
                        result.append(private_ip)

        if rackconnect(vm_) is True:
            if ssh_interface(vm_) != 'private_ips':
                data.public_ips = access_ip
                return data

        # populate return data with private_ips
        # when ssh_interface is set to private_ips and public_ips exist
        if not result and ssh_interface(vm_) == 'private_ips':
            for private_ip in private:
                ignore_ip = ignore_cidr(vm_, private_ip)
                if private_ip not in data.private_ips and not ignore_ip:
                    result.append(private_ip)

        if result:
            log.debug('result = {0}'.format(result))
            data.private_ips = result
            if ssh_interface(vm_) == 'private_ips':
                return data

        if public:
            data.public_ips = public
            if ssh_interface(vm_) != 'private_ips':
                return data

    try:
        data = salt.utils.cloud.wait_for_ip(
            __query_node_data,
            update_args=(vm_, data, vm_['floating']),
            timeout=config.get_cloud_config_value(
                'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
            interval=config.get_cloud_config_value(
                'wait_for_ip_interval', vm_, __opts__, default=10),
        )
    except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
        try:
            # It might be already up, let's destroy it!
            destroy(vm_['name'])
        except SaltCloudSystemExit:
            pass
        finally:
            raise SaltCloudSystemExit(str(exc))

    log.debug('VM is now running')

    if ssh_interface(vm_) == 'private_ips':
        ip_address = preferred_ip(vm_, data.private_ips)
    elif rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips':
        ip_address = data.public_ips
    else:
        ip_address = preferred_ip(vm_, data.public_ips)
    log.debug('Using IP address {0}'.format(ip_address))

    if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
        salt_ip_address = preferred_ip(vm_, data.private_ips)
        log.info('Salt interface set to: {0}'.format(salt_ip_address))
    else:
        salt_ip_address = preferred_ip(vm_, data.public_ips)
        log.debug('Salt interface set to: {0}'.format(salt_ip_address))

    if not ip_address:
        raise SaltCloudSystemExit('A valid IP address was not found')

    vm_['salt_host'] = salt_ip_address
    vm_['ssh_host'] = ip_address
    ret = salt.utils.cloud.bootstrap(vm_, __opts__)
    ret.update(data.__dict__)

    if hasattr(data, 'extra') and 'password' in data.extra:
        del data.extra['password']

    log.info('Created Cloud VM {0[name]!r}'.format(vm_))
    log.debug(
        '{0[name]!r} VM creation details:\n{1}'.format(
            vm_, pprint.pformat(data.__dict__)
        )
    )

    salt.utils.cloud.fire_event(
        'event',
        'created instance',
        'salt/cloud/{0}/created'.format(vm_['name']),
        {
            'name': vm_['name'],
            'profile': vm_['profile'],
            'provider': vm_['driver'],
        },
        transport=__opts__['transport']
    )

    return ret
Exemple #23
0
def create(vm_):
    '''
    Create a single VM from a data dict
    '''
    try:
        # Check for required profile parameters before sending any API calls.
        if vm_['profile'] and config.is_profile_configured(
                __opts__,
                __active_provider_name__ or 'nova',
                vm_['profile'],
                vm_=vm_) is False:
            return False
    except AttributeError:
        pass

    deploy = config.get_cloud_config_value('deploy', vm_, __opts__)
    key_filename = config.get_cloud_config_value('ssh_key_file',
                                                 vm_,
                                                 __opts__,
                                                 search_global=False,
                                                 default=None)
    if key_filename is not None and not os.path.isfile(key_filename):
        raise SaltCloudConfigError(
            'The defined ssh_key_file \'{0}\' does not exist'.format(
                key_filename))

    vm_['key_filename'] = key_filename

    __utils__['cloud.fire_event'](
        'event',
        'starting create',
        'salt/cloud/{0}/creating'.format(vm_['name']),
        args=__utils__['cloud.filter_event'](
            'creating', vm_, ['name', 'profile', 'provider', 'driver']),
        sock_dir=__opts__['sock_dir'],
        transport=__opts__['transport'])
    conn = get_conn()

    if 'instance_id' in vm_:
        # This was probably created via another process, and doesn't have
        # things like salt keys created yet, so let's create them now.
        if 'pub_key' not in vm_ and 'priv_key' not in vm_:
            log.debug('Generating minion keys for \'{0[name]}\''.format(vm_))
            vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
                salt.config.get_cloud_config_value('keysize', vm_, __opts__))
        data = conn.server_show_libcloud(vm_['instance_id'])
        if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__[
                'change_password'] is True:
            vm_['password'] = sup.secure_password()
            conn.root_password(vm_['instance_id'], vm_['password'])
    else:
        # Put together all of the information required to request the instance,
        # and then fire off the request for it
        data, vm_ = request_instance(vm_)

        # Pull the instance ID, valid for both spot and normal instances
        vm_['instance_id'] = data.id

    try:
        data = salt.utils.cloud.wait_for_ip(
            _query_node_data,
            update_args=(vm_, data, conn),
            timeout=config.get_cloud_config_value('wait_for_ip_timeout',
                                                  vm_,
                                                  __opts__,
                                                  default=10 * 60),
            interval=config.get_cloud_config_value('wait_for_ip_interval',
                                                   vm_,
                                                   __opts__,
                                                   default=10),
        )
    except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
        try:
            # It might be already up, let's destroy it!
            destroy(vm_['name'])
        except SaltCloudSystemExit:
            pass
        finally:
            raise SaltCloudSystemExit(str(exc))

    log.debug('VM is now running')

    if ssh_interface(vm_) == 'private_ips':
        ip_address = preferred_ip(vm_, data.private_ips)
    elif ssh_interface(vm_) == 'fixed_ips':
        ip_address = preferred_ip(vm_, data.fixed_ips)
    elif ssh_interface(vm_) == 'floating_ips':
        ip_address = preferred_ip(vm_, data.floating_ips)
    else:
        ip_address = preferred_ip(vm_, data.public_ips)
    log.debug('Using IP address {0}'.format(ip_address))

    if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
        salt_ip_address = preferred_ip(vm_, data.private_ips)
        log.info('Salt interface set to: {0}'.format(salt_ip_address))
    elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips':
        salt_ip_address = preferred_ip(vm_, data.fixed_ips)
        log.info('Salt interface set to: {0}'.format(salt_ip_address))
    elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips':
        salt_ip_address = preferred_ip(vm_, data.floating_ips)
        log.info('Salt interface set to: {0}'.format(salt_ip_address))
    else:
        salt_ip_address = preferred_ip(vm_, data.public_ips)
        log.debug('Salt interface set to: {0}'.format(salt_ip_address))

    if not ip_address:
        raise SaltCloudSystemExit('A valid IP address was not found')

    vm_['ssh_host'] = ip_address
    vm_['salt_host'] = salt_ip_address

    ret = __utils__['cloud.bootstrap'](vm_, __opts__)

    ret.update(data.__dict__)

    if 'password' in ret['extra']:
        del ret['extra']['password']

    log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
    log.debug('\'{0[name]}\' VM creation details:\n{1}'.format(
        vm_, pprint.pformat(data.__dict__)))

    event_data = {
        'name': vm_['name'],
        'profile': vm_['profile'],
        'provider': vm_['driver'],
        'instance_id': vm_['instance_id'],
        'floating_ips': data.floating_ips,
        'fixed_ips': data.fixed_ips,
        'private_ips': data.private_ips,
        'public_ips': data.public_ips
    }

    __utils__['cloud.fire_event']('event',
                                  'created instance',
                                  'salt/cloud/{0}/created'.format(vm_['name']),
                                  args=__utils__['cloud.filter_event'](
                                      'created', event_data, list(event_data)),
                                  sock_dir=__opts__['sock_dir'],
                                  transport=__opts__['transport'])
    __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova',
                                          vm_['driver'])
    return ret
Exemple #24
0
    def _settings():
        grains = __grains__
        pillar = __pillar__
        local_conf = __salt__['mc_macros.get_local_registry'](
            'nginx', registry_format='pack')
        naxsi_ui_pass = local_conf.get('naxsi_pass', secure_password(32))
        locations = __salt__['mc_locations.settings']()
        nbcpus = __grains__.get('num_cpus', '4')
        epoll = False
        if 'linux' in __grains__.get('kernel', '').lower():
            epoll = True
        ulimit = "65536"
        is_rp = is_reverse_proxied()
        reverse_proxy_addresses = []
        if is_rp:
            gw = grains.get('makina.default_route', {}).get('gateway', '').strip()
            if gw and gw not in reverse_proxy_addresses:
                reverse_proxy_addresses.append(gw)

        logformat = '$remote_addr - $remote_user [$time_local]  '
        logformat += '"$request" $status $bytes_sent "$http_referer" '
        logformat += '"$http_user_agent" "$gzip_ratio"'
        logformats = {
            'custom_combined': logformat
        }

        www_reg = __salt__['mc_www.settings']()
        nginxData = __salt__['mc_utils.defaults'](
            'makina-states.services.http.nginx', {
                'rotate': '365',
                'is_reverse_proxied': is_rp,
                'reverse_proxy_addresses': reverse_proxy_addresses,
                'default_vhost': True,
                'use_real_ip': True,
                'use_naxsi': False,
                'use_naxsi_secrules': True,
                'naxsi_ui_user': '******',
                'naxsi_ui_pass': naxsi_ui_pass,
                'naxsi_ui_host': '127.0.01',
                'naxsi_ui_intercept_port': '18080',
                'naxsi_ui_extract_port': '18081',
                'use_naxsi_learning': True,
                'naxsi_denied_url': "/RequestDenied",
                'real_ip_header': 'X-Forwarded-For',
                'logformat': 'custom_combined',
                'logformats': logformats,
                'v6': False,
                'allowed_hosts': [],
                'ulimit': ulimit,
                'client_max_body_size': www_reg[
                    'upload_max_filesize'],
                'open_file_cache': 'max=200000 inactive=5m',
                'open_file_cache_valid': '6m',
                'open_file_cache_min_uses': '2',
                'open_file_cache_errors': 'on',
                'epoll': epoll,
                'default_type': 'application/octet-stream',
                'worker_processes': nbcpus,
                'worker_connections': '1024',
                'multi_accept': True,
                'user': '******',
                'server_names_hash_bucket_size': '64',
                'loglevel': 'crit',
                'ldap_cache': True,
                'logdir': '/var/log/nginx',
                'access_log': '{logdir}/access.log',
                'sendfile': True,
                'tcp_nodelay': True,
                'tcp_nopush': True,
                'reset_timedout_connection': 'on',
                'client_body_timeout': '10',
                'send_timeout': '2',
                'keepalive_requests': '100000',
                'keepalive_timeout': '30',
                'types_hash_max_size': '2048',
                'server_tokens': False,
                'server_name_in_redirect': False,
                'error_log':  '{logdir}/error.log',
                'virtualhosts': {},
                'gzip': True,
                'redirect_aliases': True,
                'port': '80',
                'default_domains': ['localhost'],
                'sshl_port': '443',
                'default_activation': True,
                'package': 'nginx',
                'docdir': '/usr/share/doc/nginx',
                'doc_root': www_reg['doc_root'],
                'service': 'nginx',
                'basedir': locations['conf_dir'] + '/nginx',
                'confdir': locations['conf_dir'] + '/nginx/conf.d',
                'logdir': locations['var_log_dir'] + '/nginx',
                'wwwdir': locations['srv_dir'] + '/www',
                'vhost_default_template': (
                    'salt://makina-states/files/'
                    'etc/nginx/sites-available/vhost.conf'),
                'vhost_wrapper_template': (
                    'salt://makina-states/files/'
                    'etc/nginx/sites-available/vhost.conf'),
                'vhost_default_content': (
                    'salt://makina-states/files/'
                    'etc/nginx/sites-available/default.conf'),
                'vhost_top_template': (
                    'salt://makina-states/files/'
                    'etc/nginx/sites-available/vhost.top.conf'),
                'vhost_content_template': (
                    'salt://makina-states/files/'
                    'etc/nginx/sites-available/vhost.content.conf'),
            }
        )
        __salt__['mc_macros.update_local_registry'](
            'nginx', local_conf, registry_format='pack')
        return nginxData