Example #1
0
def install_gluster(node):
    log.info('Installing gluster packages.')
    if 'glusterfs 3.5' in node.ssh.execute('gluster --version', silent=True, ignore_exit_status=True, log_output=False)[0]:
        log.info('Gluster already installed, skipping')
    else:
        execute(node, 'wget "http://pastebin.com/raw.php?i=uzhrtg5M" -O /etc/apt/sources.list')
        node.ssh.execute('sudo add-apt-repository ppa:gluster/glusterfs-3.5 -y')
        apt_update(node, checkfirst=False)
        node.apt_install('glusterfs-server glusterfs-client software-properties-common xfsprogs attr openssh-server')
Example #2
0
def setup_bricks(node):
    log.info('Partitioning and formatting ephemeral drives.')
    # TODO i'm not sure if theres a better way to get ephemeral_devices
    ephemeral_devices = execute(node, 'ls /dev/xvda*',
                                ignore_exit_status=True)
    log.info("Gathering devices for bricks: %s" % ', '.join(ephemeral_devices))

    for brick_number, device in enumerate(ephemeral_devices):
        export_path = os.path.join('/exports', 'brick%s' % brick_number)
        gluster.mount_brick(node, device, export_path)
Example #3
0
 def unmount(self, node, device, skipif=None):
     """
     Unmounts a device if it is mounted.
     :param device: (str) path to the device.
     :param skipif: (str) if device is mounted to skipif, do not unmount
     """
     # todo implement skipif
     mm = node.get_mount_map()
     if device in mm.keys() and skipif != mm[device]:
         log.info('Unmounting {0}'.format(device))
         node.ssh.execute('umount {0}'.format(device))
Example #4
0
 def format_device(self, node, device):
     """
     Formats device
     :param node: the node.
     :param device: path to the device.
     """
     r = node.ssh.execute('file -s {0}'.format(device))
     if not re.search("XFS filesystem", r[0]):
         self.unmount(node, device)
         node.ssh.execute('mkfs.xfs {0} -f'.format(device))
     else:
         log.info('{0} already formatted, skipping'.format(device))
Example #5
0
    def mount_volume(self, node, volume, mountpoint):
        """
        Mounts gluster to a node
        :param node: the node to mount on
        :param volume: the name of the volume
        :param path: the root directory to mount the volume to.  Volume will be mounted to path/volume
        """

        if not node.ssh.path_exists(mountpoint):
            log.info("Creating mount point %s" % mountpoint)
            node.ssh.execute("mkdir -p %s" % mountpoint)

        node.ssh.execute('mount -t glusterfs master:%s %s' % (volume, mountpoint))
Example #6
0
    def run2(self):
        master = self.master
        utils.apt_update(master)
        log.info('Installing gluster packages.')
        master.ssh.execute('add-apt-repository ppa:semiosis/ubuntu-glusterfs-3.4 -y')
        utils.apt_update(master, checkfirst=False)
        master.apt_install('openssh-server glusterfs-server glusterfs-client xfsprogs attr')

        log.info('Partitioning and formatting ephemeral drives.')

        # get ephemeral drives
        ebs_devices = map(lambda d: d.replace('sd', 'xvd'), master.block_device_mapping.keys())
        all_devices = master.get_device_map().keys()
        ephemeral_devices = filter(lambda d: d not in ebs_devices, all_devices)

        log.info("Gathering devices for bricks: {0}".format(', '.join(ephemeral_devices)))
        for brick_number, device in enumerate(ephemeral_devices):
            export_path = os.path.join('/exports', 'brick{0}'.format(brick_number))
            self.pool.simple_job(self.gluster.add_brick, (master, device, export_path), jobid=device)
        self.pool.wait(len(self.gluster.device2export_path))

        log.info('Creating and starting gluster volume gv0.')
        # this is necessary if the server restarts
        #        setfattr -x trusted.glusterfs.volume-id $brick_path
        #        setfattr -x trusted.gfid $brick_path
        #        rm -rf $brick_path/.glusterfs

        self.gluster.add_volume(master, 'gv0', self.stripe, self.replicate)

        #mount gluster on all nodes
        for node in self.nodes:
            self.on_add_node2(node)

        master.ssh.execute('mkdir -p /gluster/gv0/master_scratch && ln -s /gluster/gv0/master_scratch /scratch')
Example #7
0
    def run(self, nodes, master, user, user_shell, volumes):
        install_gluster(master)
        execute(master, 'service glusterfs-server restart')

        if gluster.volume_exists(master, VOLUME_NAME):
            log.info('volume %s exists, removing' % (VOLUME_NAME))
            execute(master, 'gluster volume stop %s --mode=script' % VOLUME_NAME)
            execute(master, 'gluster volume delete %s --mode=script' % VOLUME_NAME)

        setup_bricks(master)

        gluster.create_and_start_volume(master, VOLUME_NAME, self.stripe, self.replicate)
        gluster.mount_volume(master, VOLUME_NAME, '/gluster/%s' % VOLUME_NAME)

        execute(master, 'mkdir -p /gluster/gv0/master_scratch')
        execute(master, 'ln -f -s /gluster/gv0/master_scratch /scratch')

        for node in nodes:
            self.on_add_node(node, nodes, master, user, user_shell, volumes)
Example #8
0
 def on_add_node2(self, node):
     log.info('Installing glusterfs-client')
     node.ssh.execute('add-apt-repository ppa:semiosis/ubuntu-glusterfs-3.4 -y')
     utils.apt_update(node, checkfirst=False)
     node.apt_install('glusterfs-client -y')
     self.gluster.mount_volume(node, 'gv0', '/gluster/gv0')
Example #9
0
    def run(self, nodes, master, user, user_shell, volumes):
        self.__configure__(nodes, master, user, user_shell, volumes)
        self.master = master

        log.info("Executing plugin {0}".format(self.__module__))
        catchall(self.run2)