def created(name, peers=None, **kwargs): ''' Check if volume already exists name name of the volume gluster-cluster: glusterfs.created: - name: mycluster - brick: /srv/gluster/drive1 - replica: True - count: 2 - short: True - start: True - peers: - one - two - three - four ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} volumes = __salt__['glusterfs.list_volumes']() if name in volumes: ret['result'] = True ret['comment'] = 'Volume {0} already exists.'.format(name) return ret elif __opts__['test']: ret['comment'] = 'Volume {0} will be created'.format(name) ret['result'] = None return ret if suc.check_name(name, 'a-zA-Z0-9._-'): ret['comment'] = 'Invalid characters in volume name.' ret['result'] = False return ret if any([suc.check_name(peer, 'a-zA-Z0-9._-') for peer in peers]): ret['comment'] = 'Invalid characters in a peer name.' ret['result'] = False return ret ret['comment'] = __salt__['glusterfs.create'](name, peers, **kwargs) if name in __salt__['glusterfs.list_volumes'](): ret['changes'] = {'new': name, 'old': ''} ret['result'] = True return ret
def peer(name): ''' Add another node into the peer probe. Need to add the ability to add to use ip addresses name The remote host with which to peer. CLI Example: .. code-block:: bash salt 'one.gluster.*' glusterfs.peer two ''' if suc.check_name(name, 'a-zA-Z0-9._-'): return 'Invalid characters in peer name' hosts_file = __salt__['hosts.list_hosts']() hosts_list = [] for ip, hosts in hosts_file.items(): hosts_list.extend(hosts) dig_info = __salt__['dig.A'](name) if dig_info or name in hosts_list: cmd = 'gluster peer probe {0}'.format(name) return __salt__['cmd.run'](cmd) return 'Node does not resolve to an ip address'
def peer(name): ''' Add another node into the peer list. name The remote host to probe. CLI Example: .. code-block:: bash salt 'one.gluster.*' glusterfs.peer two GLUSTER direct CLI example (to show what salt is sending to gluster): $ gluster peer probe ftp2 GLUSTER CLI 3.4.4 return example (so we know what we are parsing): #if the "peer" is the local host: peer probe: success: on localhost not needed #if the peer was just added: peer probe: success #if the peer was already part of the cluster: peer probe: success: host ftp2 port 24007 already in peer list ''' if suc.check_name(name, 'a-zA-Z0-9._-'): return 'Invalid characters in peer name' cmd = 'gluster peer probe {0}'.format(name) return __salt__['cmd.run'](cmd)
def peered(name): ''' Check if node is peered. name The remote host with which to peer. .. code-block:: yaml peer-cluster: glusterfs.peered: - name: two peer-clusters: glusterfs.peered: - names: - one - two - three - four ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} peers = __salt__['glusterfs.list_peers']() if peers: if name in peers: ret['result'] = True ret['comment'] = 'Host {0} already peered'.format(name) return ret elif __opts__['test']: ret['comment'] = 'Peer {0} will be added.'.format(name) ret['result'] = None return ret if suc.check_name(name, 'a-zA-Z0-9._-'): ret['comment'] = 'Invalid characters in peer name.' ret['result'] = False return ret ret['comment'] = __salt__['glusterfs.peer'](name) newpeers = __salt__['glusterfs.list_peers']() if name in newpeers: ret['result'] = True ret['changes'] = {'new': newpeers, 'old': peers} elif name == socket.gethostname().split('.')[0]: ret['result'] = True return ret elif 'on localhost not needed' in ret['comment']: ret['result'] = True ret['comment'] = 'Peering with localhost is not needed' else: ret['result'] = False return ret
def _check_name(name): ret = {"name": name, "changes": {}, "result": None, "comment": ""} if suc.check_name(name, "a-zA-Z0-9._-"): ret["comment"] = "Invalid characters in name." ret["result"] = False return ret else: ret["result"] = True return ret
def _check_name(name): ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if suc.check_name(name, 'a-zA-Z0-9._-'): ret['comment'] = 'Invalid characters in name.' ret['result'] = False return ret else: ret['result'] = True return ret
def started(name, **kwargs): ''' Check if volume has been started name name of the volume gluster-started: glusterfs.started: - name: mycluster ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} volumes = __salt__['glusterfs.list_volumes']() if not name in volumes: ret['result'] = False ret['comment'] = 'Volume {0} does not exist'.format(name) return ret if suc.check_name(name, 'a-zA-Z0-9._-'): ret['comment'] = 'Invalid characters in volume name.' ret['result'] = False return ret status = __salt__['glusterfs.status'](name) if status != 'Volume {0} is not started'.format(name): ret['comment'] = status ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'Volume {0} will be created'.format(name) ret['result'] = None return ret ret['comment'] = __salt__['glusterfs.start'](name) ret['result'] = True status = __salt__['glusterfs.status'](name) if status == 'Volume {0} is not started'.format(name): ret['comment'] = status ret['result'] = False return ret ret['change'] = {'new': 'started', 'old': ''} return ret
def peer(name): ''' Add another node into the peer list. name The remote host to probe. CLI Example: .. code-block:: bash salt 'one.gluster.*' glusterfs.peer two ''' if suc.check_name(name, 'a-zA-Z0-9._-'): return 'Invalid characters in peer name' cmd = 'gluster peer probe {0}'.format(name) return __salt__['cmd.run'](cmd)
def peer(name): ''' Add another node into the peer list. name The remote host to probe. CLI Example: .. code-block:: bash salt 'one.gluster.*' glusterfs.peer two GLUSTER direct CLI example (to show what salt is sending to gluster): $ gluster peer probe ftp2 GLUSTER CLI 3.4.4 return example (so we know what we are parsing): #if the "peer" is the local host: peer probe: success: on localhost not needed #if the peer was just added: peer probe: success #if the peer was already part of the cluster: peer probe: success: host ftp2 port 24007 already in peer list ''' if suc.check_name(name, 'a-zA-Z0-9._-'): raise SaltInvocationError( 'Invalid characters in peer name "{0}"'.format(name)) cmd = 'peer probe {0}'.format(name) op_result = { "exitval": _gluster_xml(cmd).find('opErrno').text, "output": _gluster_xml(cmd).find('output').text } return op_result
def peered(name): ''' Check if node is peered. name The remote host with which to peer. .. code-block:: yaml peer-cluster: glusterfs.peered: - name: two peer-clusters: glusterfs.peered: - names: - one - two - three - four ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} peers = __salt__['glusterfs.list_peers']() if peers: if name in peers: ret['result'] = True ret['comment'] = 'Host {0} already peered'.format(name) return ret elif __opts__['test']: ret['comment'] = 'Peer {0} will be added.'.format(name) ret['result'] = None return ret if suc.check_name(name, 'a-zA-Z0-9._-'): ret['comment'] = 'Invalid characters in peer name.' ret['result'] = False return ret if 'output' in __salt__['glusterfs.peer'](name): ret['comment'] = __salt__['glusterfs.peer'](name)['output'] else: ret['comment'] = '' newpeers = __salt__['glusterfs.list_peers']() #if newpeers was null, we know something didn't work. if newpeers and name in newpeers: ret['result'] = True ret['changes'] = {'new': newpeers, 'old': peers} #In case the hostname doesn't have any periods in it elif name == socket.gethostname(): ret['result'] = True return ret #In case they have a hostname like "example.com" elif name == socket.gethostname().split('.')[0]: ret['result'] = True return ret elif 'on localhost not needed' in ret['comment']: ret['result'] = True ret['comment'] = 'Peering with localhost is not needed' else: ret['result'] = False return ret
def peered(name): ''' Check if node is peered. name The remote host with which to peer. .. code-block:: yaml peer-cluster: glusterfs.peered: - name: two peer-clusters: glusterfs.peered: - names: - one - two - three - four ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} try: suc.check_name(name, 'a-zA-Z0-9._-') except SaltCloudException: ret['comment'] = 'Invalid characters in peer name.' return ret # Check if the name resolves to one of this minion IP addresses name_ips = salt.utils.network.host_to_ips(name) if name_ips is not None: # if it is None, it means resolution fails, let's not hide # it from the user. this_ips = set(salt.utils.network.ip_addrs()) this_ips.update(salt.utils.network.ip_addrs6()) if this_ips.intersection(name_ips): ret['result'] = True ret['comment'] = 'Peering with localhost is not needed' return ret peers = __salt__['glusterfs.peer_status']() if peers and any(name in v['hostnames'] for v in peers.values()): ret['result'] = True ret['comment'] = 'Host {0} already peered'.format(name) return ret if __opts__['test']: ret['comment'] = 'Peer {0} will be added.'.format(name) ret['result'] = None return ret if not __salt__['glusterfs.peer'](name): ret['comment'] = 'Failed to peer with {0}, please check logs for errors'.format( name) return ret # Double check that the action succeeded newpeers = __salt__['glusterfs.peer_status']() if newpeers and any(name in v['hostnames'] for v in newpeers.values()): ret['result'] = True ret['comment'] = 'Host {0} successfully peered'.format(name) ret['changes'] = {'new': newpeers, 'old': peers} else: ret['comment'] = 'Host {0} was successfully peered but did not appear in the list of peers'.format( name) return ret
def peered(name): ''' Check if node is peered. name The remote host with which to peer. .. code-block:: yaml peer-cluster: glusterfs.peered: - name: two peer-clusters: glusterfs.peered: - names: - one - two - three - four ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} try: suc.check_name(name, 'a-zA-Z0-9._-') except SaltCloudException as e: ret['comment'] = 'Invalid characters in peer name.' return ret # Check if the name resolves to localhost if socket.gethostbyname(name) in __salt__['network.ip_addrs'](): ret['result'] = True ret['comment'] = 'Peering with localhost is not needed' return ret peers = __salt__['glusterfs.peer_status']() if peers and any(name in v['hostnames'] for v in peers.values()): ret['result'] = True ret['comment'] = 'Host {0} already peered'.format(name) return ret if __opts__['test']: ret['comment'] = 'Peer {0} will be added.'.format(name) ret['result'] = None return ret peered = __salt__['glusterfs.peer'](name) if not peered: ret['comment'] = 'Failed to peer with {0}, please check logs for errors'.format(name) return ret # Double check that the action succeeded newpeers = __salt__['glusterfs.peer_status']() if newpeers and any(name in v['hostnames'] for v in newpeers.values()): ret['result'] = True ret['comment'] = 'Host {0} successfully peered'.format(name) ret['changes'] = {'new': newpeers, 'old': peers} else: ret['comment'] = 'Host {0} was successfully peered but did not appear in the list of peers'.format(name) return ret
def created(name, bricks, stripe=False, replica=False, device_vg=False, transport='tcp', start=False): ''' Check if volume already exists name name of the volume .. code-block:: yaml myvolume: glusterfs.created: - bricks: - host1:/srv/gluster/drive1 - host2:/srv/gluster/drive2 Replicated Volume: glusterfs.created: - name: volume2 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3 - replica: 2 - start: True ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} volumes = __salt__['glusterfs.list_volumes']() if name in volumes: if start: if isinstance(__salt__['glusterfs.status'](name), dict): ret['result'] = True cmnt = 'Volume {0} already exists and is started.'.format(name) else: result = __salt__['glusterfs.start_volume'](name) if 'started' in result: ret['result'] = True cmnt = 'Volume {0} started.'.format(name) ret['changes'] = {'new': 'started', 'old': 'stopped'} else: ret['result'] = False cmnt = result else: ret['result'] = True cmnt = 'Volume {0} already exists.'.format(name) ret['comment'] = cmnt return ret elif __opts__['test']: if start and isinstance(__salt__['glusterfs.status'](name), dict): comment = 'Volume {0} will be created and started'.format(name) else: comment = 'Volume {0} will be created'.format(name) ret['comment'] = comment ret['result'] = None return ret if suc.check_name(name, 'a-zA-Z0-9._-'): ret['comment'] = 'Invalid characters in volume name.' ret['result'] = False return ret ret['comment'] = __salt__['glusterfs.create'](name, bricks, stripe, replica, device_vg, transport, start) old_volumes = volumes volumes = __salt__['glusterfs.list_volumes']() if name in volumes: ret['changes'] = {'new': volumes, 'old': old_volumes} ret['result'] = True return ret
def volume_present(name, bricks, stripe=False, replica=False, device_vg=False, transport='tcp', start=False, force=False): ''' Ensure that the volume exists name name of the volume bricks list of brick paths start ensure that the volume is also started .. code-block:: yaml myvolume: glusterfs.volume_present: - bricks: - host1:/srv/gluster/drive1 - host2:/srv/gluster/drive2 Replicated Volume: glusterfs.volume_present: - name: volume2 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3 - replica: 2 - start: True ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} if suc.check_name(name, 'a-zA-Z0-9._-'): ret['comment'] = 'Invalid characters in volume name.' return ret volumes = __salt__['glusterfs.list_volumes']() if name not in volumes: if __opts__['test']: comment = 'Volume {0} will be created'.format(name) if start: comment += ' and started' ret['comment'] = comment ret['result'] = None return ret vol_created = __salt__['glusterfs.create_volume'](name, bricks, stripe, replica, device_vg, transport, start, force) if not vol_created: ret['comment'] = 'Creation of volume {0} failed'.format(name) return ret old_volumes = volumes volumes = __salt__['glusterfs.list_volumes']() if name in volumes: ret['changes'] = {'new': volumes, 'old': old_volumes} ret['comment'] = 'Volume {0} is created'.format(name) else: ret['comment'] = 'Volume {0} already exists'.format(name) if start: if __opts__['test']: # volume already exists ret['comment'] = ret['comment'] + ' and will be started' ret['result'] = None return ret if int(__salt__['glusterfs.info']()[name]['status']) == 1: ret['result'] = True ret['comment'] = ret['comment'] + ' and is started' else: vol_started = __salt__['glusterfs.start_volume'](name) if vol_started: ret['result'] = True ret['comment'] = ret['comment'] + ' and is now started' if not ret['changes']: ret['changes'] = {'new': 'started', 'old': 'stopped'} else: ret['comment'] = ret[ 'comment'] + ' but failed to start. Check logs for further information' return ret if __opts__['test']: ret['result'] = None else: ret['result'] = True return ret
def peered(name): """ Check if node is peered. name The remote host with which to peer. .. code-block:: yaml peer-cluster: glusterfs.peered: - name: two peer-clusters: glusterfs.peered: - names: - one - two - three - four """ ret = {"name": name, "changes": {}, "comment": "", "result": False} try: suc.check_name(name, "a-zA-Z0-9._-") except SaltCloudException: ret["comment"] = "Invalid characters in peer name." return ret # Check if the name resolves to one of this minion IP addresses name_ips = salt.utils.network.host_to_ips(name) if name_ips is not None: # if it is None, it means resolution fails, let's not hide # it from the user. this_ips = set(salt.utils.network.ip_addrs()) this_ips.update(salt.utils.network.ip_addrs6()) if this_ips.intersection(name_ips): ret["result"] = True ret["comment"] = "Peering with localhost is not needed" return ret peers = __salt__["glusterfs.peer_status"]() if peers and any(name in v["hostnames"] for v in peers.values()): ret["result"] = True ret["comment"] = "Host {} already peered".format(name) return ret if __opts__["test"]: ret["comment"] = "Peer {} will be added.".format(name) ret["result"] = None return ret if not __salt__["glusterfs.peer"](name): ret["comment"] = "Failed to peer with {}, please check logs for errors".format( name ) return ret # Double check that the action succeeded newpeers = __salt__["glusterfs.peer_status"]() if newpeers and any(name in v["hostnames"] for v in newpeers.values()): ret["result"] = True ret["comment"] = "Host {} successfully peered".format(name) ret["changes"] = {"new": newpeers, "old": peers} else: ret[ "comment" ] = "Host {} was successfully peered but did not appear in the list of peers".format( name ) return ret
def volume_present( name, bricks, stripe=False, replica=False, device_vg=False, transport="tcp", start=False, force=False, arbiter=False, ): """ Ensure that the volume exists name name of the volume bricks list of brick paths replica replica count for volume arbiter use every third brick as arbiter (metadata only) .. versionadded:: 2019.2.0 start ensure that the volume is also started .. code-block:: yaml myvolume: glusterfs.volume_present: - bricks: - host1:/srv/gluster/drive1 - host2:/srv/gluster/drive2 Replicated Volume: glusterfs.volume_present: - name: volume2 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3 - replica: 2 - start: True Replicated Volume with arbiter brick: glusterfs.volume_present: - name: volume3 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3 - host3:/srv/gluster/drive4 - replica: 3 - arbiter: True - start: True """ ret = {"name": name, "changes": {}, "comment": "", "result": False} if suc.check_name(name, "a-zA-Z0-9._-"): ret["comment"] = "Invalid characters in volume name." return ret volumes = __salt__["glusterfs.list_volumes"]() if name not in volumes: if __opts__["test"]: comment = "Volume {} will be created".format(name) if start: comment += " and started" ret["comment"] = comment ret["result"] = None return ret vol_created = __salt__["glusterfs.create_volume"]( name, bricks, stripe, replica, device_vg, transport, start, force, arbiter ) if not vol_created: ret["comment"] = "Creation of volume {} failed".format(name) return ret old_volumes = volumes volumes = __salt__["glusterfs.list_volumes"]() if name in volumes: ret["changes"] = {"new": volumes, "old": old_volumes} ret["comment"] = "Volume {} is created".format(name) else: ret["comment"] = "Volume {} already exists".format(name) if start: if __opts__["test"]: # volume already exists ret["comment"] = ret["comment"] + " and will be started" ret["result"] = None return ret if int(__salt__["glusterfs.info"]()[name]["status"]) == 1: ret["result"] = True ret["comment"] = ret["comment"] + " and is started" else: vol_started = __salt__["glusterfs.start_volume"](name) if vol_started: ret["result"] = True ret["comment"] = ret["comment"] + " and is now started" if not ret["changes"]: ret["changes"] = {"new": "started", "old": "stopped"} else: ret["comment"] = ( ret["comment"] + " but failed to start. Check logs for further information" ) return ret if __opts__["test"]: ret["result"] = None else: ret["result"] = True return ret
def peered(name): ''' Check if node is peered. name The remote host with which to peer. .. code-block:: yaml peer-cluster: glusterfs.peered: - name: two peer-clusters: glusterfs.peered: - names: - one - two - three - four ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} try: suc.check_name(name, 'a-zA-Z0-9._-') except SaltCloudException as e: ret['comment'] = 'Invalid characters in peer name.' return ret # Check if the name resolves to localhost if socket.gethostbyname(name) in __salt__['network.ip_addrs'](): ret['result'] = True ret['comment'] = 'Peering with localhost is not needed' return ret peers = __salt__['glusterfs.peer_status']() if peers and any(name in v['hostnames'] for v in peers.values()): ret['result'] = True ret['comment'] = 'Host {0} already peered'.format(name) return ret if __opts__['test']: ret['comment'] = 'Peer {0} will be added.'.format(name) ret['result'] = None return ret peered = __salt__['glusterfs.peer'](name) if not peered: ret['comment'] = 'Failed to peer with {0}, please check logs for errors'.format( name) return ret # Double check that the action succeeded newpeers = __salt__['glusterfs.peer_status']() if newpeers and any(name in v['hostnames'] for v in newpeers.values()): ret['result'] = True ret['comment'] = 'Host {0} successfully peered'.format(name) ret['changes'] = {'new': newpeers, 'old': peers} else: ret['comment'] = 'Host {0} was successfully peered but did not appear in the list of peers'.format( name) return ret
def volume_present(name, bricks, stripe=False, replica=False, device_vg=False, transport='tcp', start=False, force=False): ''' Ensure that the volume exists name name of the volume bricks list of brick paths start ensure that the volume is also started .. code-block:: yaml myvolume: glusterfs.created: - bricks: - host1:/srv/gluster/drive1 - host2:/srv/gluster/drive2 Replicated Volume: glusterfs.created: - name: volume2 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3 - replica: 2 - start: True ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} if suc.check_name(name, 'a-zA-Z0-9._-'): ret['comment'] = 'Invalid characters in volume name.' return ret volumes = __salt__['glusterfs.list_volumes']() if name not in volumes: if __opts__['test']: comment = 'Volume {0} will be created'.format(name) if start: comment += ' and started' ret['comment'] = comment ret['result'] = None return ret vol_created = __salt__['glusterfs.create_volume'](name, bricks, stripe, replica, device_vg, transport, start, force) if not vol_created: ret['comment'] = 'Creation of volume {0} failed'.format(name) return ret old_volumes = volumes volumes = __salt__['glusterfs.list_volumes']() if name in volumes: ret['changes'] = {'new': volumes, 'old': old_volumes} ret['comment'] = 'Volume {0} is created'.format(name) else: ret['comment'] = 'Volume {0} already exists'.format(name) if start: if __opts__['test']: # volume already exists ret['comment'] = ret['comment'] + ' and will be started' ret['result'] = None return ret if int(__salt__['glusterfs.info']()[name]['status']) == 1: ret['result'] = True ret['comment'] = ret['comment'] + ' and is started' else: vol_started = __salt__['glusterfs.start_volume'](name) if vol_started: ret['result'] = True ret['comment'] = ret['comment'] + ' and is now started' if not ret['changes']: ret['changes'] = {'new': 'started', 'old': 'stopped'} else: ret['comment'] = ret['comment'] + ' but failed to start. Check logs for further information' return ret if __opts__['test']: ret['result'] = None else: ret['result'] = True return ret
def peered(name): ''' Check if node is peered. name The remote host with which to peer. .. code-block:: yaml peer-cluster: glusterfs.peered: - name: two peer-clusters: glusterfs.peered: - names: - one - two - three - four ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} try: suc.check_name(name, 'a-zA-Z0-9._-') except SaltCloudException as e: ret['comment'] = 'Invalid characters in peer name.' ret['result'] = False return ret peers = __salt__['glusterfs.list_peers']() if peers: if name in peers or any([name in peers[x] for x in peers]): ret['result'] = True ret['comment'] = 'Host {0} already peered'.format(name) return ret result = __salt__['glusterfs.peer'](name) ret['comment'] = '' if 'exitval' in result: if int(result['exitval']) <= len(RESULT_CODES): ret['comment'] = RESULT_CODES[int(result['exitval'])].format(name) else: if 'comment' in result: ret['comment'] = result['comment'] newpeers = __salt__['glusterfs.list_peers']() # if newpeers was null, we know something didn't work. if newpeers and name in newpeers or newpeers and any([name in newpeers[x] for x in newpeers]): ret['result'] = True ret['changes'] = {'new': newpeers, 'old': peers} # In case the hostname doesn't have any periods in it elif name == socket.gethostname(): ret['result'] = True return ret # In case they have a hostname like "example.com" elif name == socket.gethostname().split('.')[0]: ret['result'] = True return ret elif 'on localhost not needed' in ret['comment']: ret['result'] = True ret['comment'] = 'Peering with localhost is not needed' else: ret['result'] = False return ret
def create(name, peers=None, brick='/srv/gluster/brick1', replica=False, count=2, **kwargs): ''' Create a glusterfs volume. name name of the gluster volume brick filesystem path for the brick peers peers that will be part of the cluster replica replicated or distributed cluster count number of nodes per replica block short (optional) use short names for peering CLI Example: .. code-block:: bash salt 'one.gluster*' glusterfs.create mymount /srv/ \ peers='["one", "two"]' salt -G 'gluster:master' glusterfs.create mymount /srv/gluster/brick1 \ peers='["one", "two", "three", "four"]' replica=True count=2 \ short=True start=True ''' check_peers = 'gluster peer status | awk \'/Hostname/ {print $2}\'' active_peers = __salt__['cmd.run'](check_peers).splitlines() hostname = socket.gethostname() if 'short' in kwargs and kwargs['short']: hostname = hostname.split('.')[0] if not all([ act_peer in active_peers for act_peer in peers if act_peer != hostname ]): return 'Not all peers have been probed.' if not os.path.exists(brick): return 'Brick path doesn\'t exist.' if suc.check_name(name, 'a-zA-Z0-9._-'): return 'Invalid characters in volume name' if any([suc.check_name(act_peer, 'a-zA-Z0-9._-') for act_peer in peers]): return 'Invalid characters in a peer name.' cmd = 'gluster volume create {0} '.format(name) if replica: cmd += 'replica {0} '.format(count) for act_peer in peers: cmd += '{0}:{1} '.format(act_peer, brick) log.debug('Clustering command:\n{0}'.format(cmd)) ret = __salt__['cmd.run'](cmd) if 'start' in kwargs and kwargs['start']: ret = __salt__['cmd.run']('gluster volume start {0}'.format(name)) return ret