Beispiel #1
0
    def pullC(self):
        """
         This pull input image
        """

        logger.debug(self.dock_pull)
        if self.dock_pull:
            logger.info(
                "Trying to pull %s from docker repo:%s ... \n "
                "\t \t \tThis can take some time, please wait...",
                self.dock_image, self.repository)
            if self.repository:
                self.dock_image = self.repository + '/' + self.dock_image
            logger.debug("Repo+image:%s", self.dock_image)
            try:
                ret = self.dc.pull(self.dock_image)
                #logger.debug("%s", ret)
                if "404" in ret:
                    logger.error("Failed to pull %s from provided docker repo",
                                 self.dock_image)
                    return False
                else:
                    logger.info("Successfully pulled docker image:%s" %
                                (self.dock_image))
                    return True

            except Exception as e:
                logger.critical("Failed to pull %s with an exception: %s",
                                self.dock_image, e)
                return False

        else:
            logger.debug("Dont attempt to pull given image from docker repo")
            return True
Beispiel #2
0
    def pullC(self):
        """
         This pull input image
        """

        logger.debug(self.dock_pull)
        if self.dock_pull:
            logger.info("Trying to pull %s from docker repo:%s ... \n "
                         "\t \t \tThis can take some time, please wait...", self.dock_image, self.repository)
            if self.repository:
                self.dock_image=self.repository+'/'+self.dock_image
            logger.debug("Repo+image:%s", self.dock_image)
            try:
                ret = self.dc.pull(self.dock_image)
                #logger.debug("%s", ret)
                if "404" in ret:
                    logger.error("Failed to pull %s from provided docker repo", self.dock_image)
                    return False
                else:
                    logger.info("Successfully pulled docker image:%s" % (self.dock_image))
                    return True

            except Exception as e:
                logger.critical("Failed to pull %s with an exception: %s", self.dock_image, e)
                return False

        else:
            logger.debug("Dont attempt to pull given image from docker repo")
            return True
Beispiel #3
0
    def gluster_install(self, version):
        failed_package_nodes=[]
        failed_install_nodes=[]


        nodes = run_helper.get_nodes_ip()
        logger.info("Trying to install gluster on %s nodes ", nodes)


        gluster_package_command = 'yum -y install python-devel python-setuptools gcc deltarpm yum-utils git \
                   autoconf automake bison dos2unix flex glib2-devel \
                   libaio-devel libattr-devel libibverbs-devel \
                   librdmacm-devel libtool libxml2-devel make openssl-devel \
                   pkgconfig python-devel python-eventlet python-netifaces \
                   python-paste-deploy python-simplejson python-sphinx \
                   python-webob pyxattr readline-devel rpm-build gdb dbench \
                   net-tools systemtap-sdt-devel attr psmisc findutils which \
                   xfsprogs yajl-devel lvm2-devel e2fsprogs mock nfs-utils \
                   openssh-server supervisor openssl fuse-libs wget >/dev/null'
        #gluster_package_command='ls'
        #gluster_install_command = 'cd /root/glusterfs && make install'
        gluster_install_command = "rm -rf /root/glusterfs && cd /root && git clone git://review.gluster.org/glusterfs && cd glusterfs && \
                                 git checkout -b %s origin/release-%s  && ./autogen.sh >/dev/null && ./configure>/dev/null && make >/dev/null && make install> /dev/null " %(version, version)


        for node in nodes:
            flag = flag1 = status1 = status2 = 0
            logger.info("Configuring/installing on node:%s", node)
            status1 = run_helper.run_command(node, gluster_package_command, True)
            #time.sleep(20)

            if status1:
                logger.error('Required Gluster package installation failed on node: %s' , node)
                failed_package_nodes.append(node)
                flag = 1
            else:
                logger.info("Continuing ..")
                status2 = run_helper.run_command(node, gluster_install_command, True)
                time.sleep(20)
                if status2:
                    logger.error("Failed to configure GlusterFs from source repository ")
                    failed_install_nodes.append(node)
                    flag1 = 1

                else:
                    logger.info("Successfully configured GlusterFS binary on node:%s", node)

        if status1 or status2:
            logger.critical("Failed to install gluster packages on:%s or GlusterFs binary installation failed on :%s ", failed_package_nodes, failed_install_nodes)
        else:
            logger.info("Successful Gluster Package Installation and GlusterFS Binary installation on all the nodes!")
       # if status1 == 0 and status2 == 0:
       #         logger.info("Everything went fine")
        return
Beispiel #4
0
    def __init__(self, *args, **kwargs):

        for k, w in kwargs.items():
            logger.debug("%s :%s" % (k, w))
            self.proc = w
            if w == "docker":
                if os.path.exists("/usr/bin/systemctl"):
                    self.cmd = 'systemctl start docker'
                else:
                    self.cmd = 'docker -d'
                    logger.debug(self.cmd)
            else:
                logger.error("Unknown process %s ..exiting" % (w))
                self.cmd = 'exit 1'
Beispiel #5
0
    def connectD(self):
        try:
            # note: "version" can play a role in functionality
            self.dc = docker.Client(base_url=DOCK_SOCK,
                                    version=DOCK_VERSION,
                                    timeout=30)
            if self.dc:

                dcinfo = self.dc.info()
                logger.debug("Docker information \n %s", dcinfo)
            else:

                logger.critical("Failed to get docker info:")

        except Exception as e:
            logger.error(e)
            sys.exit(1)

        return self.dc
Beispiel #6
0
    def connectD(self):
        try:
            # note: "version" can play a role in functionality
            self.dc = docker.Client(base_url=DOCK_SOCK,
                                    version=DOCK_VERSION,
                                    timeout=30)
            if self.dc:

                dcinfo = self.dc.info()
                logger.debug("Docker information \n %s", dcinfo)
            else:

                logger.critical("Failed to get docker info:")

        except Exception as e:
            logger.error(e)
            sys.exit(1)


        return self.dc
Beispiel #7
0
    def runC(self , image_tag, gl_flag, gluster_dict={}, ):
        """
        Run the container
        Creates a container that can then be `start`ed.
        Parameters are similar to those for the `docker run` command
        except it doesn't support the attach options (`-a`). See "Port bindings" and "Using volumes"  for
        more information on how to create port bindings and volume mappings.
        """

        #self.command="exec >/dev/tty 2>/dev/tty </dev/tty && /usr/bin/screen -s /bin/bash"

        self.c_tag = image_tag
        self.container_id = ""
        self.info_array = ["Hostname", "NetworkSettings"]
        #todo: For now, dont enable dock_command .
        #self.dock_command=''
        self.brick_ext = 0
        self.gflag=gl_flag
        self.brick_set=[]
        logger.info(" Create and start containers with image :%s ", self.c_tag)

        if self.gflag:
            bricks =  gluster_dict['BRICKS']
            self.brick_set = []
            for b in bricks.split(','):
                self.brick_set.append(b)

            logger.info("Bricks will be using in order:%s", self.brick_set)

        logging.info( "Enable Gluster Volume :%s" ,self.gflag)
        try:
            #docker run -i -t ubuntu /bin/bash

            for num in range(0,self.dock_numc):
                #todo: If we just want to enable auto dir creation inside same directory


                # self_note : when tty=False, the containers were just exited
                # as soon as it is created or executed the provided command
                # when its  True ,containers are  up and running , u can attach.
                # also stdin_open to True, for docker attach comand
                # ctrl+P+Q can detach the container
                # detach=False means it wont exit the shell ?

                if self.gflag:
                    self.brick_mount=gluster_dict['SERVER_EXPORT_DIR']
                    if len(self.brick_set ) < self.dock_numc:

                        logger.critical("Number of bricks given to me is less than number of nodes,  check configfile")
                        return False
                    else:
                        print "..."

                    self.dock_command =''
                    self.container_id = self.dc.create_container(
                                                    self.c_tag, command=self.dock_command, hostname=None, user=None,
                                                    detach=True, stdin_open=True, tty=True, mem_limit=0,
                                                    ports=[22, 80], environment=None, dns=None, volumes=[self.brick_mount],
                                                    volumes_from=None, network_disabled=False, name=None,
                                                    entrypoint=None, cpu_shares=None, working_dir=None)

                else:
                    #self.dock_command ='/bin/bash'
                    self.container_id = self.dc.create_container(
                                                    self.c_tag, command=self.dock_command, hostname=None, user=None,
                                                    detach=True, stdin_open=True, tty=True, mem_limit=0,
                                                    ports=[22, 80], environment=None, dns=None, volumes=None,
                                                    volumes_from=None, network_disabled=False, name=None,
                                                    entrypoint=None, cpu_shares=None, working_dir=None)

                self.cons_ids.append(self.container_id)
            logger.debug("Container Ids : %s" ,self.cons_ids)
            if not self.cons_ids:
                logger.critical( "Failed when creating Containers")
                return False

            for ids in self.cons_ids:
                try:
                    if self.gflag:
                        #For legacy reasons :), keeping below comments.
                        #self.brick_ext += 1
                        #self.brick_mount = '/rhs_bricks/brick'+str(self.brick_ext)
                        self.brick_source = self.brick_set[self.brick_ext]
                        self.brick_ext += 1

                    # TODO : look at other options
                    #mostly need to link these containers using link option
                    #http://blog.docker.io/2013/10/docker-0-6-5-links-container-naming-advanced-port-redirects-host-integration/
                    #regarding lxc_conf you can give me in dict formats
                    #also it helps when changing to static ips .etc
                    #-n flag looks similar:
                    #https://github.com/dotcloud/docker-py/issues/79

                        ret = self.dc.start(
                                            ids, binds={self.brick_source:self.brick_mount}, port_bindings={22: None, 80: None},
                                            lxc_conf=None,publish_all_ports=False, links=None, privileged=True)
                    else:

                        ret = self.dc.start(ids, binds=None, port_bindings={22: None, 80: None}, lxc_conf=None,
                                            publish_all_ports=False, links=None, privileged=True)
                    logger.debug("Container with ID :%s is started", ids)
                    time.sleep(10)

		  	   # TODO: may be I need to commit these containers later with a different workflow.
                except Exception as e:
                    logger.critical("Exception raised when starting Container with id:%s", ids)

                    logger.error(e)
                    return False


            #TODO: This may not be the right place to put list containers
            #and capturing the requested information
            #logger.debug "Going to list the containers"
            #cons =  self.dc.containers(quiet=False, all=False, trunc=True, latest=False, since=None,
            #                   before=None, limit=-1)
            #for c in cons:
            #   self.c_id =  dict(cons[0])['Id']
            #   self.cons_ids.append( self.c_id)

            logger.info("  Information about running containers ")

            for ids in self.cons_ids:
                try:
                    insp_obj = self.dc.inspect_container(ids)
                    hostname =insp_obj['Config']['Hostname']
                    ipaddr = insp_obj['NetworkSettings']['IPAddress']
                    if not ipaddr :
                        logger.critical("Not able to get IP address of %s", hostname)

                    self.container_ips.append(insp_obj['NetworkSettings']['IPAddress'])
                except Exception as e:
                    logger.critical('Exception raised when inspecting Containers')
                    logger.debug(e)

                    return False

        except Exception as e:

            logger.critical('Exception raised when creating/starting Containers')
            logger.debug(e)
            return False

        return True
Beispiel #8
0
def main(dryr=0, dockit_log=dockit_log_file):

    parser = OptionParser()
    parser = add_options(parser)


    options, arguments = parser.parse_args()
    globalopts = dict(options.__dict__)

    pull_option_args = ['image', 'dockerrepo']
    build_option_args = ['dockerfile', 'imgtag']
    start_option_args = ['image', 'imgtag', 'count']
    # gluster_optins_args = ['gluvolume', 'gluinst']

    anyopt = [options.pullimg, options.buildimg, options.startc,
              options.dry]
    anyopt_dict = {'pullimg': pull_option_args, 'buildimg':
                   build_option_args, 'startc': start_option_args}

    check = [o for o in anyopt if o]
    if not check:
        logging.error("You missed one of the must required option..  "
                      "reread and execute.... exiting .")
        print_menu()
        sys.exit(1)
    if options.gluinst or options.gluvolume:
        if not options.glumode:
            logger.error("You can not use gluster actions without -g option")
            sys.exit(1)
    if options.glumode and not options.gluvolume and not options.gluinst:
        logger.warn("-g dont have any effect without --gv or --gi options")

    final_true_list = [
        [key, value] for key, value in globalopts.items()
        if value]
    logger.debug("Input \t :%s", final_true_list)
    final_list = []
    for it in final_true_list:
        for k, v in anyopt_dict.items():
            if k == it[0]:
                final_list.append(v)
    # print final_list
    my_good = list(set([item for sublist in final_list for item in sublist]))

    if options.startc and options.buildimg:
        my_good.remove('image')
        logger.debug("Required Parameters for your request:%s", my_good)

    if options.pullimg and options.startc:
        if options.imgtag is None:
            options.imgtag = 'latest'
            logger.debug("image tag : %s , docker repo:%s",
                         options.imgtag, options.dockerrepo)

    if options.pullimg and options.buildimg:
        logger.error("Only one at a time, pull or build")
        sys.exit(1)

    for good in my_good:
        if not options.__dict__[good]:
            logger.error("\n \t Unfortunately  You Missed:%s", good)
            parser.print_help()
            sys.exit(1)

    logger.info(
	"Dockit starting.. Process logs are available at:%s", dockit_log_file)

    if options.count:
        options.count = int(options.count)

    if options.startc:
        options = process_startc_option(options)

    if options.dry:
        logger.info("Dry run : Dockit will not attempt to install any package")
        dryr = 1
    else:
        logger.debug("Install packages if required, this is not a dry run...")

    try:
        sysobj = Packageinst(dryr)
        if sysobj:
            sysobj.getsysinfo()
            ret = sysobj.checkprereq()

            if ret:
                logger.info("Success:Pre-requisites are installed")
            else:
                logger.debug("Either install it or let me install ")
                sys.exit(1)
            logger.debug("Going to check/start docker daemon")
            procd = Procstart(process="docker")
            checkret = procd.checkproc()
            if not checkret:
                ret = procd.execproc()
                if ret:
                    logger.info("Successfully started docker deamon... ")
                else:
                    logger.error('Exiting')
                    sys.exit(1)
            procd.infoproc()
            logger.debug("Connecting to the docker deamon")

            talktoDocker(
                options.pullimg, options.image, options.imgtag,
                options.count, options.dockerfile, options.dockerrepo,
                options.buildimg, options.startc, options.glumode,
                options.gluinst, options.gluvolume)

    except Exception as e:
        logger.debug(e)
        sys.exit(1)
Beispiel #9
0
def process_startc_option(options):
    prefer = raw_input("Do you want to continue (y/n)")

    if prefer != 'y':
        logger.debug("Exiting ")
        sys.exit(1)

    logger.info("Proceeding ")
    if options.glumode:
        if options.gluinst:
            logger.info("Need to install gluster inside containers")
            gluster_config['GLUSTER_VERSION'] = options.gluinst

        if options.gluvolume:
            logger.info("\n Need to configure gluster volume..\n")

            g_voltype = ''
            if not options.configfile:
                g_voltype = raw_input(
                    "Gluster Volume Type (ex: 2x2x1 where "
                    "(distribute,replica, stripe count in order)\t :")
                g_volname = raw_input(
                    "Gluster Volume Name (ex: glustervol)\t :")
                g_export = raw_input(
                    "Gluster Export Dir Name (ex: /rhs_bricks)\t :")
                g_brick_file = raw_input(
                    "Gluster brick file (ex: /home/configfile)\t :")
            else:
                logger.info(
                    "Reading gluster configuration from config file")
                print read_config_file(options.configfile)

            try:
                if g_voltype:
                    volumeconfig = re.search(
                        r'([0-9]+)x([0-9]+)x([0-9]+)', g_voltype)
                else:
                    gluster_config['VOL_TYPE'] = gluster_config.get(
                        'VOL_TYPE', '1x2x1')
                    gluster_config['VOLNAME'] = gluster_config.get(
                        'VOLNAME', 'defaultVol')
                    gluster_config['SERVER_EXPORT_DIR'] = gluster_config.get(
                        'SERVER_EXPORT_DIR', '/defaultExport')
                    volumeconfig = re.search(
                        r'([0-9]+)x([0-9]+)x([0-9]+)',
                        gluster_config['VOL_TYPE'])
                distributecount = volumeconfig.group(1)
                replicacount = volumeconfig.group(2)
                stripevcount = volumeconfig.group(3)
            except Exception as e:
                logger.debug(
                    "Error in parsing volume type string..exiting")
                logger.debug(e)
                sys.exit(1)

            if distributecount == '0':
                distributecount = 1
            if replicacount == '0':
                replicacount = 1
            if stripevcount == '0':
                stripevcount = 1

            options.count = int(distributecount) * \
                int(replicacount) * int(stripevcount)
            logger.info("No of gluster containers to spawn:%s",
                        options.count)
            prefer = raw_input("Do you want to continue (y/n):")
            if prefer == 'y':
                if not options.configfile:
                    gluster_config['VOLNAME'] = g_volname
                    gluster_config['VOL_TYPE'] = g_voltype
                    gluster_config['SERVER_EXPORT_DIR'] = g_export
                    gluster_config['BRICK_FILE'] = g_brick_file
                    # gluster_config['BRICKS'] =
                    # read_config_file_b(g_brick_file)
                    read_config_file_b(g_brick_file)
                else:
                    logger.info(
                        "Configuration read from configuration file")

                logger.info("%s", gluster_config)
            else:
                logger.error(
                    "Exiting.. Invoke dockit command with proper "
                    "option of gluster mode")
                sys.exit(1)
    else:
        logger.info("Run containers natively, no mode configured")
    prefer = ''
    return options
Beispiel #10
0
def talktoDocker(
        pulloption, baseimage, imagetag, numcontainers, dockerfile,
        dockerrepo, buildoption, startoption, gluster_mode,
        gluster_install, gluster_volume):

    new_image_tag = ''
    flag = flag1 = gluster_flag = 0
    cons_ids = []
    logger.debug(
        "Docker image name :%s \t Image Tag:%s \t number of Containers:%s",
        baseimage, imagetag, numcontainers)

    try:
        connret = dockit.DockerCli(
            "connect", pulloption, baseimage, imagetag, numcontainers,
            dockerfile, dockit_log_file, dockerrepo, buildoption)
        if connret:
            logger.info("Successfully connected to docker deamon: \n"
                        "\t \t \t pull/build/start containers accordingly.")

        else:
            logger.error("Connection return failed..exiting.")

            sys.exit(1)

        if pulloption:
            logger.debug("Proceeding with actions on Image:%s", baseimage)
            # if dockerrepo == None:
            #    logger.debug(
            #       "Base image pulling is not supported with "
            #       "this version of dockit \n"
            #       " please provide dockerrepo")
            #    sys.exit(1)
            pullret = connret.pullC()
            if pullret:
                logger.info("Done with pulling.. continuing")
                if dockerrepo and baseimage:
                    new_image_tag = dockerrepo+'/'+baseimage+':'+'latest'
                    flag1 = 1
                logger.debug("new_image_tag:%s", new_image_tag)
            else:
                logger.error("Error when pulling ")
        else:
            logger.info("Not trying to pull image:%s.. continuing", baseimage)
        if buildoption:
            logger.debug("Continuing build process with %s", dockerfile)

            built_image = connret.buildC()
            if built_image:
                logger.info(
                    " Image built from docker file :%s with id:%s and tag:%s",
                    built_image, built_image['Id'], built_image['RepoTags'])
                if imagetag:
                    logger.debug("Image tag:%s", imagetag)
                    new_image_tag = imagetag+':latest'
                    flag = 1
                logger.debug("new_image_tag:%s", new_image_tag)

            else:
                logger.error(
                    "Failed when building from docker file:\n"
                    "Check docker file path and options ")

        else:
            logger.debug("Not trying to build the image from docker file")

        if startoption:

            if flag or flag1:
                logger.debug("Flag:%s \t Flag1:%s image tag:\t %s",
                             flag, flag1, new_image_tag)

            else:
                if baseimage and imagetag:
                    new_image_tag = baseimage+':'+imagetag
                logger.debug("Using image tag :%s", new_image_tag)

            ret_exist = connret.image_by_tag(new_image_tag)

            if ret_exist:
                logger.debug("Image exists :%s with ID:%s  ",
                             ret_exist, ret_exist['Id'])
                logger.info("Going to run the containers")

                if gluster_mode:
                    if gluster_volume:
                        gluster_flag = 1
                    else:
                        gluster_flag = 0
                runret = connret.runC(
                    ret_exist['RepoTags'][0], gluster_flag, gluster_config, )
                if runret:
                    if not connret.container_ips:
                        logger.critical(
                            "Something went wrong when spawning "
                            "containers:exiting")
                        sys.exit(1)

                    logger.info(
                        "Containers are running successfully.."
                        "please login and work!!!!")
                    print (60 * '-')
                    logger.info("Details about running containers..\n")
                    logger.info(
                        "Container IPs \t : %s\n ", connret.container_ips)

                    for c in connret.cons_ids:
                        c_id = dict(connret.cons_ids[0])['Id']
                        cons_ids.append(c_id)
                    logger.info("Container Ids \t : %s \n ", cons_ids)
                    print (60 * '-')
                    # todo : Its possible to auto login to these containers
                    # via below , commenting it out for now
                    # loginC(connret.container_ips, connret.cons_ids)
                    if gluster_mode:
                        gluster_cli = create_vol.glusteractions()

                        if gluster_cli:
                            logger.debug("Successfully created gluster client")
                            run_helper.rh_config_dict[
                                'SERVER_IP_ADDRS'] = connret.container_ips
                        else:
                            logger.error("Failed to create gluster client")
                        run_helper.con_pass = getpass.getpass()
                        if gluster_install:
                            ginst = gluster_config.get(
                                'GLUSTER_VERSION', '3.5')
                            if ginst:
                                gluster_cli.gluster_install(ginst)
                            else:
                                logger.debug(
                                    "Failed to get Gluster Version from dict.")
                        else:
                            logger.info("Gluster installation not required")
                        if gluster_volume:

                            run_helper.rh_config_dict[
                                'VOL_TYPE'] = gluster_config['VOL_TYPE']
                            run_helper.rh_config_dict['SERVER_EXPORT_DIR'] = \
                                gluster_config['SERVER_EXPORT_DIR']
                            run_helper.rh_config_dict['TRANS_TYPE'] = 'tcp'
                            run_helper.rh_config_dict[
                                'VOLNAME'] = gluster_config['VOLNAME']
                            logger.debug(
                                "Successfully filled configuration details:%s",
                                run_helper.rh_config_dict)
                            gluster_cli.create_gluster_volume(start=True)
                            logging.info(
                                'Gluster Volume operations done! '
                                'Please mount volume :%s in your client',
                                gluster_config['VOLNAME'])
                        else:
                            logger.debug(
                                "Gluster Volume creation not required")
                    else:
                        logger.info("Done!")
                else:
                    logger.error("Failed when starting/inspecting containers")
            else:
                logger.error(
                    "Image + tag does not exist.. "
                    "I cant start container from this..exiting")

                sys.exit(1)
        else:
            logger.debug("Not trying to start containers..")
            logger.info("Dockit finished...")
            return True

    except Exception as e:
        logger.critical("Failed on :%s", e)
        sys.exit(1)
Beispiel #11
0
    def checkprereq(self):
        """
        This function will check for pre-req packages
        which need to be installed and if its not available
        it will be installed
        """

        rhelflag = 0
        if sysdict['dist'] == "fedora":
            req_pcks = fedora_req_pcks
            rhelflag = 1
        elif sysdict['dist'] == "redhat":
            if sysdict['ver'] < 7:
                req_pcks = rhel_req_pcks
            else:
                req_pcks = rhel7_req_pcks
            rhelflag = 1
        elif sysdict['dist'] == "centos":
            if sysdict['ver'] < 7:
                req_pcks = centos_req_pcks
            else:
                req_pcks = centos7_req_pcks
            rhelflag = 1
        elif sysdict['dist'] == "Ubuntu":
            req_pcks = ubuntu_req_pcks
        else:
            logger.error("Unknown Distribution.")
            sys.exit(1)

        logger.info(
            "Distribution:%s Required %s packages \n\t \t \t "
            "Making yum transactions", sysdict['dist'], req_pcks)

        if rhelflag == 1:
            try:
                import yum
            except Exception as e:
                print "Error when importing yum module"
                sys.exit(1)
            yb = yum.YumBase()
            yb.conf.cache = os.geteuid() != 1
            for pck in req_pcks:
                if yb.rpmdb.searchNevra(name=pck):
                    logger.info("%s -> Installed" % (pck))
                    avail_pcks.append(pck)
                else:
                    logger.info("%s -> not installed" % (pck))
                    mis_pcks.append(pck)
                    if not self.skipflag:
                        try:
                            if pck == "python-docker-py":
                                logger.debug("Trying with pip")
                                cmd = "sudo pip install {0} -U "
                                cmd += ">/dev/null".format("docker-py")
                                os.system(cmd)
                                mis_pcks.remove(pck)
                            else:
                                logger.info(
                                    "Unknown package for me to install "
                                    "via pip.. Proceeding")
                        except Exception as e:
                            logger.error(e)
                            logger.error(
                                "Error occurred when trying to install %s "
                                "using pip -> Try to install "
                                "manually" % (pck))
                            sys.exit(1)
                        try:
                            yb.install(name=pck)
                            time.sleep(5)
                        except yum.Errors.InstallError, err:
                            logger.error(
                                "exiting : Error when installing "
                                "package %s", pck)
                            logger.error("%s", (str(err)))
                            sys.exit(1)
                        except Exception as e:
                            logger.critical(e)
                            logger.error(
                                "Error occurred when trying to install %s "
                                "-> Try to install manually" % (pck))
                            sys.exit(1)
Beispiel #12
0
    def runC(self , image_tag, gl_flag, gluster_dict={}, ):
        """
        Run the container
        Creates a container that can then be `start`ed.
        Parameters are similar to those for the `docker run` command
        except it doesn't support the attach options (`-a`). See "Port bindings" and "Using volumes"  for
        more information on how to create port bindings and volume mappings.
        """

        #self.command="exec >/dev/tty 2>/dev/tty </dev/tty && /usr/bin/screen -s /bin/bash"

        self.c_tag = image_tag
        self.container_id = ""
        self.info_array = ["Hostname", "NetworkSettings"]
        self.brick_ext = 0
        self.gflag=gl_flag
        self.brick_set=[]
        logger.info(" Create and start containers with image :%s ", self.c_tag)

        if self.gflag:
            bricks =  gluster_dict['BRICKS']
            self.brick_set = []
            for b in bricks.split(','):
                self.brick_set.append(b)

            logger.info("Bricks will be using in order:%s", self.brick_set)

        logging.info( "Enable Gluster Volume :%s" ,self.gflag)
        try:

            for num in range(0,self.dock_numc):

                if self.gflag:
                    self.brick_mount=gluster_dict['SERVER_EXPORT_DIR']
                    if len(self.brick_set ) < self.dock_numc:

                        logger.critical("Number of bricks given to me is less than number of nodes,  check configfile")
                        return False
                    else:
                        print "..."

                    self.dock_command =''
                    self.container_id = self.dc.create_container(
                                                    self.c_tag, command=self.dock_command, hostname=None, user=None,
                                                    detach=True, stdin_open=True, tty=True, mem_limit=0,
                                                    ports=[22, 80], environment=None, dns=None, volumes=[self.brick_mount],
                                                    volumes_from=None, network_disabled=False, name=None,
                                                    entrypoint=None, cpu_shares=None, working_dir=None)

                else:
                    self.dock_command ='/bin/bash'
                    self.container_id = self.dc.create_container(
                                                    self.c_tag, command=self.dock_command, hostname=None, user=None,
                                                    detach=True, stdin_open=True, tty=True, mem_limit=0,
                                                    ports=[22, 80], environment=None, dns=None, volumes=None,
                                                    volumes_from=None, network_disabled=False, name=None,
                                                    entrypoint=None, cpu_shares=None, working_dir=None)

                self.cons_ids.append(self.container_id)
            logger.debug("Container Ids : %s" ,self.cons_ids)
            if not self.cons_ids:
                logger.critical( "Failed when creating Containers")
                return False

            for ids in self.cons_ids:
                try:
                    if self.gflag:
                        self.brick_source = self.brick_set[self.brick_ext]
                        self.brick_ext += 1
                        ret = self.dc.start(
                                            ids, binds={self.brick_source:self.brick_mount}, port_bindings={22: None, 80: None},
                                            lxc_conf=None,publish_all_ports=False, links=None, privileged=True)
                    else:

                        ret = self.dc.start(ids, binds=None, port_bindings={22: None, 80: None}, lxc_conf=None,
                                            publish_all_ports=False, links=None, privileged=True)
                    logger.debug("Container with ID :%s is started", ids)
                    time.sleep(10)

                except Exception as e:
                    logger.critical("Exception raised when starting Container with id:%s", ids)

                    logger.error(e)
                    return False


            #cons =  self.dc.containers(quiet=False, all=False, trunc=True, latest=False, since=None,
            #                   before=None, limit=-1)
            #for c in cons:
            #   self.c_id =  dict(cons[0])['Id']
            #   self.cons_ids.append( self.c_id)

            logger.info("  Information about running containers ")

            for ids in self.cons_ids:
                try:
                    insp_obj = self.dc.inspect_container(ids)
                    hostname =insp_obj['Config']['Hostname']
                    ipaddr = insp_obj['NetworkSettings']['IPAddress']
                    if not ipaddr :
                        logger.critical("Not able to get IP address of %s", hostname)

                    self.container_ips.append(insp_obj['NetworkSettings']['IPAddress'])
                except Exception as e:
                    logger.critical('Exception raised when inspecting Containers')
                    logger.debug(e)

                    return False

        except Exception as e:

            logger.critical('Exception raised when creating/starting Containers')
            logger.debug(e)
            return False

        return True
Beispiel #13
0
    def runC(
        self,
        image_tag,
        gl_flag,
        gluster_dict={},
    ):
        """
        Run the container
        Creates a container that can then be `start`ed.
        Parameters are similar to those for the `docker run` command
        except it doesn't support the attach options (`-a`). See "Port bindings" and "Using volumes"  for
        more information on how to create port bindings and volume mappings.
        """

        #self.command="exec >/dev/tty 2>/dev/tty </dev/tty && /usr/bin/screen -s /bin/bash"

        self.c_tag = image_tag
        self.container_id = ""
        self.info_array = ["Hostname", "NetworkSettings"]
        self.brick_ext = 0
        self.gflag = gl_flag
        self.brick_set = []
        logger.info(" Create and start containers with image :%s ", self.c_tag)

        if self.gflag:
            bricks = gluster_dict['BRICKS']
            self.brick_set = []
            for b in bricks.split(','):
                self.brick_set.append(b)

            logger.info("Bricks will be using in order:%s", self.brick_set)

        logging.info("Enable Gluster Volume :%s", self.gflag)
        try:

            for num in range(0, self.dock_numc):

                if self.gflag:
                    self.brick_mount = gluster_dict['SERVER_EXPORT_DIR']
                    if len(self.brick_set) < self.dock_numc:

                        logger.critical(
                            "Number of bricks given to me is less than number of nodes,  check configfile"
                        )
                        return False
                    else:
                        print "..."

                    self.dock_command = ''
                    self.container_id = self.dc.create_container(
                        self.c_tag,
                        command=self.dock_command,
                        hostname=None,
                        user=None,
                        detach=True,
                        stdin_open=True,
                        tty=True,
                        mem_limit=0,
                        ports=[22, 80],
                        environment=None,
                        dns=None,
                        volumes=[self.brick_mount],
                        volumes_from=None,
                        network_disabled=False,
                        name=None,
                        entrypoint=None,
                        cpu_shares=None,
                        working_dir=None)

                else:
                    self.dock_command = '/bin/bash'
                    self.container_id = self.dc.create_container(
                        self.c_tag,
                        command=self.dock_command,
                        hostname=None,
                        user=None,
                        detach=True,
                        stdin_open=True,
                        tty=True,
                        mem_limit=0,
                        ports=[22, 80],
                        environment=None,
                        dns=None,
                        volumes=None,
                        volumes_from=None,
                        network_disabled=False,
                        name=None,
                        entrypoint=None,
                        cpu_shares=None,
                        working_dir=None)

                self.cons_ids.append(self.container_id)
            logger.debug("Container Ids : %s", self.cons_ids)
            if not self.cons_ids:
                logger.critical("Failed when creating Containers")
                return False

            for ids in self.cons_ids:
                try:
                    if self.gflag:
                        self.brick_source = self.brick_set[self.brick_ext]
                        self.brick_ext += 1
                        ret = self.dc.start(
                            ids,
                            binds={self.brick_source: self.brick_mount},
                            port_bindings={
                                22: None,
                                80: None
                            },
                            lxc_conf=None,
                            publish_all_ports=False,
                            links=None,
                            privileged=True)
                    else:

                        ret = self.dc.start(ids,
                                            binds=None,
                                            port_bindings={
                                                22: None,
                                                80: None
                                            },
                                            lxc_conf=None,
                                            publish_all_ports=False,
                                            links=None,
                                            privileged=True)
                    logger.debug("Container with ID :%s is started", ids)
                    time.sleep(10)

                except Exception as e:
                    logger.critical(
                        "Exception raised when starting Container with id:%s",
                        ids)

                    logger.error(e)
                    return False

            #cons =  self.dc.containers(quiet=False, all=False, trunc=True, latest=False, since=None,
            #                   before=None, limit=-1)
            #for c in cons:
            #   self.c_id =  dict(cons[0])['Id']
            #   self.cons_ids.append( self.c_id)

            logger.info("  Information about running containers ")

            for ids in self.cons_ids:
                try:
                    insp_obj = self.dc.inspect_container(ids)
                    hostname = insp_obj['Config']['Hostname']
                    ipaddr = insp_obj['NetworkSettings']['IPAddress']
                    if not ipaddr:
                        logger.critical("Not able to get IP address of %s",
                                        hostname)

                    self.container_ips.append(
                        insp_obj['NetworkSettings']['IPAddress'])
                except Exception as e:
                    logger.critical(
                        'Exception raised when inspecting Containers')
                    logger.debug(e)

                    return False

        except Exception as e:

            logger.critical(
                'Exception raised when creating/starting Containers')
            logger.debug(e)
            return False

        return True
Beispiel #14
0
def main(dryr=0, dockit_log=dockit_log_file):

    parser = OptionParser()
    parser.add_option("-d", "--dry_run",
                      action="store_true", dest="dry", default=False,
                      help="Do dry run - dont try to install any packages")

    parser.add_option("-p", "--pullimage",
                      action="store_true", dest="pullimg", default=False,
                      help="Whether to pull from the docker repo ? Need to specify dockerrepo and image name ")


    parser.add_option("-s", "--startc",
                      action="store_true", dest="startc", default=False,
                      help="Whether to start from an image ? Need to specify image and tag ")

    parser.add_option("-b", "--buildimage",
                      action="store_true", dest="buildimg", default=False,
                      help="Whether to build image from the dockerfile? Need to specify dockerfile path, and imagetag")

    parser.add_option("-g", "--gluster_mode",
                      action="store_true", dest="glumode", default=False,
                      help="Configure gluster volume in containers")

    parser.add_option("-i", "--image",
                      dest="image", help="Image name  - Containers will be based on this image", metavar="IMAGE")

    parser.add_option("-t", "--imgtag",
                      dest="imgtag", help="Image tag name  - Containers will be assigned this tag", metavar="IMAGETAG")

    parser.add_option("-n", "--count",
                      dest="count", help="Number of containers to start  - ", metavar="COUNT")

    parser.add_option("-c", "--configfile",
                      dest="configfile", help="COnfig file path to read gluster configuration  - ", metavar="CONFIGFILE")

    parser.add_option("-f", "--dockerfile",
                      dest="dockerfile", help="Docker file path to build the container  - ", metavar="DOCKERFILE")

    parser.add_option("-r", "--dockerrepo",
                      dest="dockerrepo", help="Docker repository name with a trailing blackslash  - ", metavar="DOCKERREPO")

    parser.add_option("--gv", "--glustervolume",
                      action="store_true", dest = "gluvolume", default=False,help="Gluster Volume Creation  inside containers  - Valid with -g option ")
                     # dest="gluvolume", help="Gluster Volume Creation  inside containers  - Valid with -g option ", metavar="GLUSTERVOLUME")

    parser.add_option("--gi", "--glusterinstall",
                      dest="gluinst", help="Install gluster inside containers  - Valid with -g option ", metavar="GLUSTERVERSION")

    logger.info("Dockit starting.. Process logs are available at:%s", dockit_log_file)

    options, arguments = parser.parse_args()
    globalopts=dict(options.__dict__)

    pull_option_args = ['image','dockerrepo']
    #pull_option_args = ['image']
    build_option_args = ['dockerfile','imgtag']
    start_option_args = ['image', 'imgtag', 'count']
    gluster_optins_args = ['gluvolume','gluinst']

    anyopt = [ options.pullimg , options.buildimg , options.startc , options.dry]
    anyopt_dict = { 'pullimg':pull_option_args , 'buildimg':build_option_args , 'startc':start_option_args }

    check = [o for o in anyopt if o]
    if not check:
        logging.error( "You missed one of the must required option..  reread and execute.... exiting .")
        print_menu()
        sys.exit(1)
    if options.gluinst or options.gluvolume:
        if not options.glumode:
            logger.error("You can not use gluster actions without -g option")
            sys.exit(1)
    if options.glumode and not options.gluvolume and not options.gluinst:
        logger.warn("-g dont have any effect without --gv or --gi options")

    final_true_list = [[key,value] for key,value in globalopts.items() if value != False if value != None]
    logger.debug("Input \t :%s" ,final_true_list)
    final_list=[]
    for it in final_true_list:
        for k,v in anyopt_dict.items():
            if k == it[0]:
                final_list.append(v)
    #print final_list
    my_good = list(set([item for sublist in final_list for item in sublist]))

    if options.startc and options.buildimg:
        my_good.remove('image')
        logger.debug("Required Parameters for your request:%s", my_good)

    if options.pullimg and options.startc:
        if options.imgtag== None:
            options.imgtag='latest'
            logger.debug("image tag : %s , docker repo:%s", options.imgtag, options.dockerrepo)

    if options.pullimg and options.buildimg:
        logger.error( "Only one at a time, pull or build")
        sys.exit(1)

    for good in my_good:
        if not options.__dict__[good]:
                logger.error("\n \t Unfortunately  You Missed:%s", good)
                parser.print_help()
                sys.exit(1)


    if options.count:
        options.count=int(options.count)

    if options.startc:

        prefer =  raw_input ("Do you want to continue (y/n)")
        if prefer=='y':
            logger.info( "Proceeding ")
            if options.glumode:


                if options.gluinst:
                    logger.info( "Need to install gluster inside containers")
                    gluster_config['GLUSTER_VERSION'] = options.gluinst

                if options.gluvolume:
                    logger.info( "\n Need to configure gluster volume..\n")

                    g_voltype=''
                    if not options.configfile:
                        g_voltype = raw_input("Gluster Volume Type (ex: 2x2x1 where (distribute,replica, stripe count in order)\t :")
                        g_volname = raw_input("Gluster Volume Name (ex: glustervol)\t :")
                        g_export  = raw_input("Gluster Export Dir Name (ex: /rhs_bricks)\t :")
                        g_brick_file = raw_input("Gluster brick file (ex: /home/configfile)\t :")
                    else:
                        logger.info( "Reading gluster configuration from config file")
                        print read_config_file(options.configfile)

                    try:
                        if g_voltype:
                            volumeconfig = re.search(r'([0-9]+)x([0-9]+)x([0-9]+)', g_voltype)
                        else:
                            gluster_config['VOL_TYPE'] = gluster_config.get('VOL_TYPE', '1x2x1')
                            gluster_config['VOLNAME']=gluster_config.get('VOLNAME', 'defaultVol')
                            gluster_config['SERVER_EXPORT_DIR']=gluster_config.get('SERVER_EXPORT_DIR','/defaultExport')
                            volumeconfig = re.search(r'([0-9]+)x([0-9]+)x([0-9]+)',gluster_config['VOL_TYPE'])
                        distributecount = volumeconfig.group(1)
                        replicacount = volumeconfig.group(2)
                        stripevcount = volumeconfig.group(3)
                    except Exception as e:
                        logger.debug( "Error in parsing volume type string..exiting")
                        logger.debug(e)
                        sys.exit(1)

                    if distributecount == '0':
                        distributecount =  1
                    if replicacount == '0':
                        replicacount =  1
                    if stripevcount == '0':
                        stripevcount =   1

                    options.count = int(distributecount) * int(replicacount) * int(stripevcount)
                    logger.info( "No of gluster containers to spawn:%s" , options.count)
                    prefer = raw_input ("Do you want to continue (y/n):")
                    if prefer == 'y':
                        if not options.configfile:
                            gluster_config['VOLNAME']=g_volname
                            gluster_config['VOL_TYPE']=g_voltype
                            gluster_config['SERVER_EXPORT_DIR']=g_export
                            gluster_config['BRICK_FILE']=g_brick_file
                            #gluster_config['BRICKS'] = read_config_file_b(g_brick_file)
                            read_config_file_b(g_brick_file)
                        else:
                            logger.info( "Configuration read from configuration file")

                        logger.info("%s", gluster_config)
                    else:
                        logger.error( "Exiting.. Invoke dockit command with proper option of gluster mode")
                        sys.exit(1)
            else:
                logger.info( "Run containers natively, no mode configured")
            prefer=''
        else:
            logger.debug( "Exiting ")
            sys.exit(1)

    if options.dry:
        logger.info("Dry run : Dockit will not attempt to install any package")
        dryr= 1
    else:
        logger.debug("Install packages if required, this is not a dry run...")

    try:
        sysobj = Packageinst(dryr)
        if sysobj:
            sysobj.getsysinfo()
            ret = sysobj.checkprereq()

            if ret:
                logger.info("Success:Pre-requisites are installed")
            else:
                logger.debug("Either install it or let me install ")
                sys.exit(1)
            logger.debug("Going to check/start docker daemon")
            procd = Procstart(process="docker")
            checkret = procd.checkproc()
            if not checkret:
                ret = procd.execproc()
                if ret:
                    logger.info("Successfully started docker deamon... ")
                else:
                    logger.error('Exiting')
                    sys.exit(1)
            procd.infoproc()
            logger.debug("Connecting to the docker deamon")

            talktoDocker(options.pullimg, options.image, options.imgtag, options.count, options.dockerfile,
                         options.dockerrepo , options.buildimg, options.startc, options.glumode, options.gluinst, options.gluvolume)

    except Exception as e:
        logger.debug(e)
        sys.exit(1)
Beispiel #15
0
    def gluster_install(self, version):
        failed_package_nodes = []
        failed_install_nodes = []

        nodes = run_helper.get_nodes_ip()
        logger.info("Trying to install gluster on %s nodes ", nodes)

        gluster_package_command = (
            'yum -y install python-devel python-setuptools deltarpm yum-utils '
            'gcc git autoconf automake bison dos2unix flex glib2-devel '
            'libaio-devel libattr-devel libibverbs-devel '
            'librdmacm-devel libtool libxml2-devel make openssl-devel '
            'pkgconfig python-devel python-eventlet python-netifaces '
            'python-paste-deploy python-simplejson python-sphinx '
            'python-webob pyxattr readline-devel rpm-build gdb dbench '
            'net-tools systemtap-sdt-devel attr psmisc findutils which '
            'xfsprogs yajl-devel lvm2-devel e2fsprogs mock nfs-utils '
            'openssh-server supervisor openssl fuse-libs wget >/dev/null')
        # gluster_package_command='ls'
        # gluster_install_command = 'cd /root/glusterfs && make install'
        gluster_install_command = (
            "rm -rf /root/glusterfs && cd /root && git clone "
            "git://review.gluster.org/glusterfs && cd glusterfs "
            "&& git checkout -b {0} origin/release-{1}  && "
            "./autogen.sh >/dev/null && ./configure>/dev/null "
            "&& make >/dev/null && make install> /dev/null ".format(
                version, version))

        for node in nodes:
            status1 = status2 = 0
            logger.info("Configuring/installing on node:%s", node)
            status1 = run_helper.run_command(node, gluster_package_command,
                                             True)

            if status1:
                logger.error(
                    'Required Gluster package installation '
                    'failed on node: %s', node)
                failed_package_nodes.append(node)
            else:
                logger.info("Continuing ..")
                status2 = run_helper.run_command(node, gluster_install_command,
                                                 True)
                time.sleep(20)
                if status2:
                    logger.error("Failed to configure GlusterFs from "
                                 "source repository ")
                    failed_install_nodes.append(node)
                else:
                    logger.info(
                        "Successfully configured GlusterFS "
                        "binary on node:%s", node)

        if status1 or status2:
            logger.critical(
                "Failed to install gluster packages on:%s or GlusterFs "
                "binary installation failed on :%s ", failed_package_nodes,
                failed_install_nodes)
        else:
            logger.info(
                "Successful Gluster Package Installation and GlusterFS "
                "Binary installation on all the nodes!")
        return
Beispiel #16
0
    def create_gluster_volume(self, start=True):

        nodes = run_helper.get_nodes_ip()
        logger.info("nodes are %s", nodes)

        masternode = nodes[0]
        export_dir = run_helper.get_server_export_dir()
        if export_dir is None:
            export_dir = '/rhs_bricks'
        vol_type = run_helper.get_volume_type()
        if vol_type is not None:
            volconfig = re.search(r'([0-9]+)x([0-9]+)x([0-9]+)', vol_type)
            distcount = volconfig.group(1)
            repcount = volconfig.group(2)
            stripecount = volconfig.group(3)
        else:
            distcount = '2'
            repcount = '2'
            stripecount = '1'

        trans_type = run_helper.get_trans_type()
        if trans_type == '':
            trans_type = 'tcp'

        volname = run_helper.get_vol_name()
        if volname == '':
            volname = 'hosdu'

        number_nodes = len(nodes)
        logger.info("Number of nodes: %s", number_nodes)
        if distcount == '0':
            distcount = 1
        if repcount == '0':
            repcount = 1
        if stripecount == '0':
            stripecount = 1

        number_bricks = int(distcount) * int(repcount) * int(stripecount)

        logger.info("number of bricks:%s", number_bricks)

        if number_bricks > number_nodes:
            logger.critical(
                "number of bricks and number of servers don't match.\n")
            logger.critical("The support to have more than 1 brick per "
                            "container is not there yet \n")
            return 1

        if repcount == '1':
            replica_count = ''
        else:
            replica_count = "replica %s" % repcount

        if stripecount == '1':
            stripe_count = ''
        else:
            stripe_count = "stripe %s" % stripecount
        # pre_create_cleanup(nodes, export_dir)

        brick_list = []
        node_index = 0
        for i in range(0, number_bricks):
            brick = "%s:%s/%s_brick%d" % (nodes[node_index], export_dir,
                                          volname, i)
            brick_list.append(brick)
            node_index = node_index + 1
            if node_index > number_nodes:
                node_index = 0

        vol_create_cmd = ("gluster --mode=script volume create %s %s %s "
                          "transport %s %s force" %
                          (volname, replica_count, stripe_count, trans_type,
                           ' '.join(brick_list)))

        flag = 0
        for node in nodes:
            status = run_helper.run_command(node, 'pgrep glusterd || glusterd',
                                            True)
            if status:
                logger.error('glusterd can not be started in node: %s', node)
                flag = 1

        if flag:
            logger.info(
                'glusterd can not be started successfully in all nodes. '
                'Exiting...')
            sys.exit(1)

        flag = 0
        for node in nodes:
            if node != masternode:
                status = run_helper.run_command(masternode,
                                                'gluster peer probe ' + node,
                                                False)
                time.sleep(20)
                if status:
                    logger.error('peer probe went wrong in %s', node)
                    flag = 1

        if flag:
            logger.critical(
                'Peer probe went wrong in some machines. Exiting...')
            sys.exit(1)

        status = run_helper.run_command(masternode, vol_create_cmd, True)
        if status:
            logger.critical('volume creation failed.')

        if status == 0 and start:
            status = run_helper.run_command(
                masternode, "gluster --mode=script volume start %s" % volname,
                False)

        return status
Beispiel #17
0
                nodes.append(client_ip)


    if remoterun == True:
        for node in nodes:
            run_command(node, cmd, True)

    if threaded_run == True:
        threads = []
        for node in nodes:
            t = threading.Thread(target=run_command, args=(node, cmd, True))
            t.start()
            threads.append(t)


    if scpsend == True:
        for node in nodes:
            rcopy(node, sfile, destpath, True)

    if not scpsend and not remoterun and not threaded_run:
        logger.error ('Option unhandled. Please execute with proper option')
        usage()
        sys.exit(1)

    return 0


#read_config_file()
if __name__ == '__main__':
    main()
Beispiel #18
0
                else:
                    logger.info("%s -> not installed" % (pck))
                    mis_pcks.append(pck)
                    if not self.skipflag:
                        try:
                            if pck == "python-docker-py":
                                logger.debug("Trying with pip")
                                cmd = ("sudo pip install {0} -U >"
                                       "/dev/null".format("docker-py"))
                                os.system(cmd)
                                mis_pcks.remove(pck)
                            else:
                                logger.info("Unknown package to install "
                                            "via pip.. Proceeding")
                        except Exception as e:
                            logger.error(e)
                            logger.error(
                                "Error occurred when trying to install %s "
                                "using pip -> Try to install manually", pck)
                            sys.exit(1)
            if len(mis_pcks) > 0:
                if self.skipflag:
                    logger.info("Please install the %s packages and "
                                "try again.", mis_pcks)

                    sys.exit(1)
                else:
                    try:

                        for pkgs in mis_pcks:
                            os_cmd = ("sudo apt-get install -y %s && "
Beispiel #19
0
                            logger.critical(e)
                            logger.error("Error occurred when trying to install %s -> Try to install manually" % (pck))
                            sys.exit(1)
            if len(mis_pcks) > 0:
                if self.skipflag:
                    logger.info("Please install the %s packages and try again.", mis_pcks)

                    sys.exit(1)
                else:
                    try:
                        yb.resolveDeps()
                        yb.buildTransaction()
                        yb.processTransaction()
                        return True
                    except Exception as e:
                        logger.error(
                            "Yum transaction failure:%s .. Giving one more try", e)
                        for pkgs in mis_pcks:
                            os_cmd = "yum install -y %s >/dev/null" %(pkgs)
                            if os.system(os_cmd):
                                print "Failed again to install %s package" % (pkgs)
                                sys.exit(1)


        except Exception as e:
            logger.critical("Exiting..%s", e)

            sys.exit(1)
        return True


class Procstart:
Beispiel #20
0
    def create_gluster_volume(self,start=True):

        nodes = run_helper.get_nodes_ip()
        logger.info( "nodes are %s" , nodes)

        masternode = nodes[0]
        export_dir = run_helper.get_server_export_dir()
        if export_dir == None:
            export_dir = '/rhs_bricks'
        vol_type = run_helper.get_volume_type()
        if vol_type != None:
            volconfig = re.search(r'([0-9]+)x([0-9]+)x([0-9]+)', vol_type)
            distcount = volconfig.group(1)
            repcount = volconfig.group(2)
            stripecount = volconfig.group(3)
        else:
            distcount = '2'
            repcount = '2'
            stripecount = '1'

        trans_type = run_helper.get_trans_type()
        if trans_type == '':
            trans_type = 'tcp'

        volname = run_helper.get_vol_name()
        if volname == '':
            volname = 'hosdu'

        number_nodes = len(nodes)
        logger.info( "Number of nodes: %s" ,number_nodes)
        if distcount == '0':
            distcount =  1
        if repcount == '0':
            repcount =  1
        if stripecount == '0':
            stripecount =  1

        number_bricks = int(distcount) * int(repcount) * int(stripecount)

        logger.info( "number of bricks:%s" , number_bricks)

        if number_bricks > number_nodes:
            logger.critical("number of bricks and number of servers don't match.\n")
            logger.critical("The support to have more than 1 brick per container is not there yet \n")
            return 1

        if repcount == '1':
            replica_count = ''
        else:
            replica_count = "replica %s" % repcount

        if stripecount == '1':
            stripe_count = ''
        else:
            stripe_count = "stripe %s" % stripecount
        #pre_create_cleanup(nodes, export_dir)

        brick_list = []
        node_index = 0
        for i in range(0, number_bricks):
            brick = "%s:%s/%s_brick%d" % (nodes[node_index], export_dir, volname, i)
            brick_list.append(brick)
            node_index = node_index + 1
            if node_index > number_nodes:
                node_index = 0

        vol_create_cmd = "gluster --mode=script volume create %s %s %s transport %s %s force" % (volname, replica_count, stripe_count, trans_type, ' '.join(brick_list))

        flag = 0
        for node in nodes:
            status = run_helper.run_command(node, 'pgrep glusterd || glusterd', True)
            if status:
                logger.error('glusterd can not be started in node: %s' , node)
                flag = 1

        if flag:
            logger.info('glusterd can not be started successfully in all nodes. Exiting...')
            sys.exit(1)

        flag = 0
        for node in nodes:
            if node != masternode:
                status = run_helper.run_command(masternode, 'gluster peer probe ' + node, False)
                time.sleep(20)
                if status:
                    logger.error('peer probe went wrong in %s' , node)
                    flag = 1

        if flag:
            logger.critical('Peer probe went wrong in some machines. Exiting...')
            sys.exit(1)

        status = run_helper.run_command(masternode, vol_create_cmd, True)
        if status:
            logger.critical('volume creation failed.')

        if status == 0 and start == True:
            status = run_helper.run_command(masternode, "gluster --mode=script volume start %s" % volname, False)

        return status