예제 #1
0
def run_command(node, cmd, verbose):
        #pmk = logging.getLogger("paramiko")
        #pmk.setLevel(logging.WARNING)
        #logger.addHandler(pmk)

        logging.getLogger("paramiko").setLevel(logging.WARNING)
        ssh_handle = paramiko.SSHClient()
        ssh_handle.load_system_host_keys()
        ssh_handle.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        #todo : may need to look at user name as well
        ssh_handle.connect(node, username='******', password=con_pass)

        chan = ssh_handle.get_transport().open_session()


        try:
            chan.exec_command(cmd)
        except:
            logger.critical ( "unable to execute the command %s   on the remote server %s ", cmd, node)

        fout = chan.makefile('rb')
        ferr = chan.makefile_stderr('rb')
        ret_code = chan.recv_exit_status()
        if verbose == True:
            logger.debug ("node: %s -> cmd: %s ->  exit status: %d ", node,cmd, ret_code)
            # print cmd
            print '\n' + fout.read() +ferr.read()
           # logger.debug("%s ",fout.read())
           # logger.error("%s ",ferr.read())


        ssh_handle.close()

        return ret_code
예제 #2
0
def get_mountpoint():
    try:
        mountpoint = rh_config_dict['MOUNTPOINT']
    except:
        logger.critical(
            'Unable to find the mount point. '
            'Please set the MOUNTPOINT in configfile')
        sys.exit(1)

    invalid_mountpoints = [
        '/', '//', '/root', '/root/', '/usr',
        '/usr/', '/etc', '/etc/', '/sbin', '/sbin/',
        '/boot', '/boot/', '/opt', '/opt/', '/var',
        '/var/', '/bin', '/bin/']
    if mountpoint in invalid_mountpoints:
        logger.critical(
            "%s  + ' is not a valid mountpoint."
            "Please provide a valid mountpoint. "
            "Aborting...", mountpoint)
        sys.exit(1)

    if mountpoint[-1] == '/':
        mountpoint = mountpoint[:-1]

    return mountpoint
예제 #3
0
    def pullC(self):
        """
         This pull input image
        """

        logger.debug(self.dock_pull)
        if self.dock_pull:
            logger.info(
                "Trying to pull %s from docker repo:%s ... \n "
                "\t \t \tThis can take some time, please wait...",
                self.dock_image, self.repository)
            if self.repository:
                self.dock_image = self.repository + '/' + self.dock_image
            logger.debug("Repo+image:%s", self.dock_image)
            try:
                ret = self.dc.pull(self.dock_image)
                #logger.debug("%s", ret)
                if "404" in ret:
                    logger.error("Failed to pull %s from provided docker repo",
                                 self.dock_image)
                    return False
                else:
                    logger.info("Successfully pulled docker image:%s" %
                                (self.dock_image))
                    return True

            except Exception as e:
                logger.critical("Failed to pull %s with an exception: %s",
                                self.dock_image, e)
                return False

        else:
            logger.debug("Dont attempt to pull given image from docker repo")
            return True
예제 #4
0
파일: dockit.py 프로젝트: yekeqiang/dockit
    def pullC(self):
        """
         This pull input image
        """

        logger.debug(self.dock_pull)
        if self.dock_pull:
            logger.info("Trying to pull %s from docker repo:%s ... \n "
                         "\t \t \tThis can take some time, please wait...", self.dock_image, self.repository)
            if self.repository:
                self.dock_image=self.repository+'/'+self.dock_image
            logger.debug("Repo+image:%s", self.dock_image)
            try:
                ret = self.dc.pull(self.dock_image)
                #logger.debug("%s", ret)
                if "404" in ret:
                    logger.error("Failed to pull %s from provided docker repo", self.dock_image)
                    return False
                else:
                    logger.info("Successfully pulled docker image:%s" % (self.dock_image))
                    return True

            except Exception as e:
                logger.critical("Failed to pull %s with an exception: %s", self.dock_image, e)
                return False

        else:
            logger.debug("Dont attempt to pull given image from docker repo")
            return True
예제 #5
0
def get_mount_type():
    try:
        mount_type = rh_config_dict['MOUNT_TYPE']
    except:
        logger.critical ( 'unable to find the valid mount type. Please set MOUNT_TYPE in configfile')
        sys.exit(1)

    return mount_type
예제 #6
0
파일: install.py 프로젝트: yekeqiang/dockit
    def checkprereq(self):
        """
			This function will check for pre-req packages
			which need to be installed and if its not available
			it will be installed
		"""
        try:
            if sysdict['dist'] == "fedora":
                req_pcks = list(fedora_req_pcks)
            elif sysdict['dist'] == "redhat":

                if sysdict['ver'] < '7':
                    req_pcks = list(rhel_req_pcks)
                else:
                    req_pcks = list(rhel7_req_pcks)
            elif sysdict['dist'] == "centos":
                req_pcks = list(centos_req_pcks)
            else:
                logger.error("Unknown Distribution for me")
                sys.exit(1)

            logger.info("Distribution:%s Required %s packages \n\t \t \t Making yum transactions", sysdict['dist'], req_pcks)
            yb = yum.YumBase()
            yb.conf.cache = os.geteuid() != 1
            for pck in req_pcks:
                if yb.rpmdb.searchNevra(name=pck):
                    logger.info("%s -> Installed" % (pck))
                    avail_pcks.append(pck)
                else:
                    logger.info("%s -> not installed" % (pck))
                    mis_pcks.append(pck)
                    if not self.skipflag:
                        try:
                            if pck == "python-docker-py":
                                logger.debug("Trying with pip")
                                cmd = "sudo pip install {0} -U >/dev/null".format("docker-py")
                                os.system(cmd)
                                mis_pcks.remove(pck)
                            else:
                                logger.info("Unknown package for me to install via pip.. Proceeding")
                        except Exception as e:
                            logger.error(e)
                            logger.error("Error occurred when trying to install %s  using pip -> Try to install manually" % (pck))
                            sys.exit(1)
                        try:
                            yb.install(name=pck)
                            time.sleep(5)
                        except yum.Errors.InstallError, err:
                            logger.error("exiting : Error when installing package %s", pck)
                            logger.error("%s", (str(err)))
                            sys.exit(1)
                        except Exception as e:
                            logger.critical(e)
                            logger.error("Error occurred when trying to install %s -> Try to install manually" % (pck))
                            sys.exit(1)
예제 #7
0
    def gluster_install(self, version):
        failed_package_nodes=[]
        failed_install_nodes=[]


        nodes = run_helper.get_nodes_ip()
        logger.info("Trying to install gluster on %s nodes ", nodes)


        gluster_package_command = 'yum -y install python-devel python-setuptools gcc deltarpm yum-utils git \
                   autoconf automake bison dos2unix flex glib2-devel \
                   libaio-devel libattr-devel libibverbs-devel \
                   librdmacm-devel libtool libxml2-devel make openssl-devel \
                   pkgconfig python-devel python-eventlet python-netifaces \
                   python-paste-deploy python-simplejson python-sphinx \
                   python-webob pyxattr readline-devel rpm-build gdb dbench \
                   net-tools systemtap-sdt-devel attr psmisc findutils which \
                   xfsprogs yajl-devel lvm2-devel e2fsprogs mock nfs-utils \
                   openssh-server supervisor openssl fuse-libs wget >/dev/null'
        #gluster_package_command='ls'
        #gluster_install_command = 'cd /root/glusterfs && make install'
        gluster_install_command = "rm -rf /root/glusterfs && cd /root && git clone git://review.gluster.org/glusterfs && cd glusterfs && \
                                 git checkout -b %s origin/release-%s  && ./autogen.sh >/dev/null && ./configure>/dev/null && make >/dev/null && make install> /dev/null " %(version, version)


        for node in nodes:
            flag = flag1 = status1 = status2 = 0
            logger.info("Configuring/installing on node:%s", node)
            status1 = run_helper.run_command(node, gluster_package_command, True)
            #time.sleep(20)

            if status1:
                logger.error('Required Gluster package installation failed on node: %s' , node)
                failed_package_nodes.append(node)
                flag = 1
            else:
                logger.info("Continuing ..")
                status2 = run_helper.run_command(node, gluster_install_command, True)
                time.sleep(20)
                if status2:
                    logger.error("Failed to configure GlusterFs from source repository ")
                    failed_install_nodes.append(node)
                    flag1 = 1

                else:
                    logger.info("Successfully configured GlusterFS binary on node:%s", node)

        if status1 or status2:
            logger.critical("Failed to install gluster packages on:%s or GlusterFs binary installation failed on :%s ", failed_package_nodes, failed_install_nodes)
        else:
            logger.info("Successful Gluster Package Installation and GlusterFS Binary installation on all the nodes!")
       # if status1 == 0 and status2 == 0:
       #         logger.info("Everything went fine")
        return
예제 #8
0
def get_client_ip():
    try:
        clients = rh_config_dict['CLIENT_IP_ADDRS']
    except:
        logger.critical ('Unable to find client IP address.')
        sys.exit(1)

    clients_set = set([])
    for client in clients.split(','):
        clients_set.add(client)

    return list(clients_set)
예제 #9
0
def get_trans_type():
    try:
        trans_type = rh_config_dict['TRANS_TYPE']
    except:
        trans_type = None

    supported_trans_types = ['tcp', 'rdma', 'tcp,rdma']
    if trans_type not in supported_trans_types:
        logger.critical ("%s is not a supported transport type. Please set the proper supported transport type",trans_type)
        sys.exit(1)

    return trans_type
예제 #10
0
def get_server_export_dir():
    try:
        export_dir = rh_config_dict['SERVER_EXPORT_DIR']
    except:
        export_dir = None

    if export_dir[-1] == '/':
        export_dir = export_dir[:-1]
    invalid_export_dir = ['/', '//', '/root', '/root/', '/usr', '/usr/', '/etc', '/etc/', '/sbin', '/sbin/', '/boot', '/boot/', '/opt', '/opt/', '/var', '/var/', '/bin', '/bin/']
    if export_dir in invalid_export_dir:
        logger.critical ('%s can NOT be the server export directory. Please give other valid directory',export_dir)
        sys.exit(1)

    return export_dir
예제 #11
0
def get_nodes_ip():
    logger.debug("Received below configuration from caller")
    logger.debug('%s', rh_config_dict)

    try:
        servers = rh_config_dict['SERVER_IP_ADDRS']
    except:
        logger.critical(
            'Unable to retrive the server ip address from configfile. '
            'Please set SERVERS_IP_ADDRS in configfile')
        sys.exit(1)

    server_set = set([])

    for server in servers:

        server_set.add(server)

    return list(server_set)
예제 #12
0
    def connectD(self):
        try:
            # note: "version" can play a role in functionality
            self.dc = docker.Client(base_url=DOCK_SOCK,
                                    version=DOCK_VERSION,
                                    timeout=30)
            if self.dc:

                dcinfo = self.dc.info()
                logger.debug("Docker information \n %s", dcinfo)
            else:

                logger.critical("Failed to get docker info:")

        except Exception as e:
            logger.error(e)
            sys.exit(1)

        return self.dc
예제 #13
0
파일: dockit.py 프로젝트: yekeqiang/dockit
    def connectD(self):
        try:
            # note: "version" can play a role in functionality
            self.dc = docker.Client(base_url=DOCK_SOCK,
                                    version=DOCK_VERSION,
                                    timeout=30)
            if self.dc:

                dcinfo = self.dc.info()
                logger.debug("Docker information \n %s", dcinfo)
            else:

                logger.critical("Failed to get docker info:")

        except Exception as e:
            logger.error(e)
            sys.exit(1)


        return self.dc
예제 #14
0
파일: dockit.py 프로젝트: yekeqiang/dockit
    def buildC(self):
        """
                        #Eqvnt to `docker build` command. Either `path` or `fileobj` needs
                        #to be set. `path` can be a local path (to a directory containing a
                        #Dockerfile) or a remote URL. `fileobj` must be a readable file-like
                        #object to a Dockerfile.

                        # Best option would be to create a docker file according to the input
                        # provided from the command line:
                        # That said, suppose if we specify fedora 2*2 it should spawn
                        # 4 containers with gluster packages installed

                        # An easy way of doing this is ship repos with glusterd running
                        # for different distros. so that that image can be mentioned in
                        # in the docker file.
        """
        #todo: Add support for github url

        self.waitsecs = 15
        logger.debug("Timeout for build process has been set to :%s minutes", self.waitsecs)
        try:
            logger.info("Working on docker file :%s .. Please wait..", self.dock_filepath)
            if self.dock_filepath:
                logger.info("Build it with dockerfile:%s \t and Tag: %s and self.dc: %s .."
                             "need to wait .." , self.dock_filepath, self.dock_tag, self.dc)
                #ret = self.dc.build( path=self.dock_filepath, tag=self.dock_tag, quiet=False, fileobj=None, nocache=False,
                #                   rm=False, stream=False)
                ret = self.dc.build(path=self.dock_filepath, tag=self.dock_tag)

                if ret:
                    while(self.waitsecs >= 0):
                        time.sleep(60)
                        logger.debug("Fetching docker images and tags for %s", self.dock_tag)

                        # More optimization can be as shown below.. how-ever disabling it for now:
                        #if next((image for image in self.dc.images() if self.dock_tag in image['RepoTags']), None):
                        #    logger.debug("Image found with tag")
                        #    return True
                        #else:
                        #    logger.debug("Failed to find requested tagged image in first attempt")
                        #    self.waitsecs =- 180

                        self.images = self.dc.images(name=None, quiet=False, all=False, viz=False)
                        for im in self.images:
                            for tag in im['RepoTags']:
                                if self.dock_tag in str(tag):
                                    logger.info("dock_tag:%s  successfully built and available in repo", self.dock_tag)
                                    #return True
                                    return im

                        self.waitsecs = self.waitsecs - 1

                    return False

                else:
                    logger.critical("Failed to build docker image")
            else:
                logger.critical("I am sorry, I cant build without dockerfile")
                return False



        except Exception as e:
            logger.debug("Failed build: ...")
            logger.debug(e)
            return False
예제 #15
0
파일: dockit.py 프로젝트: yekeqiang/dockit
    def runC(self , image_tag, gl_flag, gluster_dict={}, ):
        """
        Run the container
        Creates a container that can then be `start`ed.
        Parameters are similar to those for the `docker run` command
        except it doesn't support the attach options (`-a`). See "Port bindings" and "Using volumes"  for
        more information on how to create port bindings and volume mappings.
        """

        #self.command="exec >/dev/tty 2>/dev/tty </dev/tty && /usr/bin/screen -s /bin/bash"

        self.c_tag = image_tag
        self.container_id = ""
        self.info_array = ["Hostname", "NetworkSettings"]
        #todo: For now, dont enable dock_command .
        #self.dock_command=''
        self.brick_ext = 0
        self.gflag=gl_flag
        self.brick_set=[]
        logger.info(" Create and start containers with image :%s ", self.c_tag)

        if self.gflag:
            bricks =  gluster_dict['BRICKS']
            self.brick_set = []
            for b in bricks.split(','):
                self.brick_set.append(b)

            logger.info("Bricks will be using in order:%s", self.brick_set)

        logging.info( "Enable Gluster Volume :%s" ,self.gflag)
        try:
            #docker run -i -t ubuntu /bin/bash

            for num in range(0,self.dock_numc):
                #todo: If we just want to enable auto dir creation inside same directory


                # self_note : when tty=False, the containers were just exited
                # as soon as it is created or executed the provided command
                # when its  True ,containers are  up and running , u can attach.
                # also stdin_open to True, for docker attach comand
                # ctrl+P+Q can detach the container
                # detach=False means it wont exit the shell ?

                if self.gflag:
                    self.brick_mount=gluster_dict['SERVER_EXPORT_DIR']
                    if len(self.brick_set ) < self.dock_numc:

                        logger.critical("Number of bricks given to me is less than number of nodes,  check configfile")
                        return False
                    else:
                        print "..."

                    self.dock_command =''
                    self.container_id = self.dc.create_container(
                                                    self.c_tag, command=self.dock_command, hostname=None, user=None,
                                                    detach=True, stdin_open=True, tty=True, mem_limit=0,
                                                    ports=[22, 80], environment=None, dns=None, volumes=[self.brick_mount],
                                                    volumes_from=None, network_disabled=False, name=None,
                                                    entrypoint=None, cpu_shares=None, working_dir=None)

                else:
                    #self.dock_command ='/bin/bash'
                    self.container_id = self.dc.create_container(
                                                    self.c_tag, command=self.dock_command, hostname=None, user=None,
                                                    detach=True, stdin_open=True, tty=True, mem_limit=0,
                                                    ports=[22, 80], environment=None, dns=None, volumes=None,
                                                    volumes_from=None, network_disabled=False, name=None,
                                                    entrypoint=None, cpu_shares=None, working_dir=None)

                self.cons_ids.append(self.container_id)
            logger.debug("Container Ids : %s" ,self.cons_ids)
            if not self.cons_ids:
                logger.critical( "Failed when creating Containers")
                return False

            for ids in self.cons_ids:
                try:
                    if self.gflag:
                        #For legacy reasons :), keeping below comments.
                        #self.brick_ext += 1
                        #self.brick_mount = '/rhs_bricks/brick'+str(self.brick_ext)
                        self.brick_source = self.brick_set[self.brick_ext]
                        self.brick_ext += 1

                    # TODO : look at other options
                    #mostly need to link these containers using link option
                    #http://blog.docker.io/2013/10/docker-0-6-5-links-container-naming-advanced-port-redirects-host-integration/
                    #regarding lxc_conf you can give me in dict formats
                    #also it helps when changing to static ips .etc
                    #-n flag looks similar:
                    #https://github.com/dotcloud/docker-py/issues/79

                        ret = self.dc.start(
                                            ids, binds={self.brick_source:self.brick_mount}, port_bindings={22: None, 80: None},
                                            lxc_conf=None,publish_all_ports=False, links=None, privileged=True)
                    else:

                        ret = self.dc.start(ids, binds=None, port_bindings={22: None, 80: None}, lxc_conf=None,
                                            publish_all_ports=False, links=None, privileged=True)
                    logger.debug("Container with ID :%s is started", ids)
                    time.sleep(10)

		  	   # TODO: may be I need to commit these containers later with a different workflow.
                except Exception as e:
                    logger.critical("Exception raised when starting Container with id:%s", ids)

                    logger.error(e)
                    return False


            #TODO: This may not be the right place to put list containers
            #and capturing the requested information
            #logger.debug "Going to list the containers"
            #cons =  self.dc.containers(quiet=False, all=False, trunc=True, latest=False, since=None,
            #                   before=None, limit=-1)
            #for c in cons:
            #   self.c_id =  dict(cons[0])['Id']
            #   self.cons_ids.append( self.c_id)

            logger.info("  Information about running containers ")

            for ids in self.cons_ids:
                try:
                    insp_obj = self.dc.inspect_container(ids)
                    hostname =insp_obj['Config']['Hostname']
                    ipaddr = insp_obj['NetworkSettings']['IPAddress']
                    if not ipaddr :
                        logger.critical("Not able to get IP address of %s", hostname)

                    self.container_ips.append(insp_obj['NetworkSettings']['IPAddress'])
                except Exception as e:
                    logger.critical('Exception raised when inspecting Containers')
                    logger.debug(e)

                    return False

        except Exception as e:

            logger.critical('Exception raised when creating/starting Containers')
            logger.debug(e)
            return False

        return True
예제 #16
0
def talktoDocker(
        pulloption, baseimage, imagetag, numcontainers, dockerfile,
        dockerrepo, buildoption, startoption, gluster_mode,
        gluster_install, gluster_volume):

    new_image_tag = ''
    flag = flag1 = gluster_flag = 0
    cons_ids = []
    logger.debug(
        "Docker image name :%s \t Image Tag:%s \t number of Containers:%s",
        baseimage, imagetag, numcontainers)

    try:
        connret = dockit.DockerCli(
            "connect", pulloption, baseimage, imagetag, numcontainers,
            dockerfile, dockit_log_file, dockerrepo, buildoption)
        if connret:
            logger.info("Successfully connected to docker deamon: \n"
                        "\t \t \t pull/build/start containers accordingly.")

        else:
            logger.error("Connection return failed..exiting.")

            sys.exit(1)

        if pulloption:
            logger.debug("Proceeding with actions on Image:%s", baseimage)
            # if dockerrepo == None:
            #    logger.debug(
            #       "Base image pulling is not supported with "
            #       "this version of dockit \n"
            #       " please provide dockerrepo")
            #    sys.exit(1)
            pullret = connret.pullC()
            if pullret:
                logger.info("Done with pulling.. continuing")
                if dockerrepo and baseimage:
                    new_image_tag = dockerrepo+'/'+baseimage+':'+'latest'
                    flag1 = 1
                logger.debug("new_image_tag:%s", new_image_tag)
            else:
                logger.error("Error when pulling ")
        else:
            logger.info("Not trying to pull image:%s.. continuing", baseimage)
        if buildoption:
            logger.debug("Continuing build process with %s", dockerfile)

            built_image = connret.buildC()
            if built_image:
                logger.info(
                    " Image built from docker file :%s with id:%s and tag:%s",
                    built_image, built_image['Id'], built_image['RepoTags'])
                if imagetag:
                    logger.debug("Image tag:%s", imagetag)
                    new_image_tag = imagetag+':latest'
                    flag = 1
                logger.debug("new_image_tag:%s", new_image_tag)

            else:
                logger.error(
                    "Failed when building from docker file:\n"
                    "Check docker file path and options ")

        else:
            logger.debug("Not trying to build the image from docker file")

        if startoption:

            if flag or flag1:
                logger.debug("Flag:%s \t Flag1:%s image tag:\t %s",
                             flag, flag1, new_image_tag)

            else:
                if baseimage and imagetag:
                    new_image_tag = baseimage+':'+imagetag
                logger.debug("Using image tag :%s", new_image_tag)

            ret_exist = connret.image_by_tag(new_image_tag)

            if ret_exist:
                logger.debug("Image exists :%s with ID:%s  ",
                             ret_exist, ret_exist['Id'])
                logger.info("Going to run the containers")

                if gluster_mode:
                    if gluster_volume:
                        gluster_flag = 1
                    else:
                        gluster_flag = 0
                runret = connret.runC(
                    ret_exist['RepoTags'][0], gluster_flag, gluster_config, )
                if runret:
                    if not connret.container_ips:
                        logger.critical(
                            "Something went wrong when spawning "
                            "containers:exiting")
                        sys.exit(1)

                    logger.info(
                        "Containers are running successfully.."
                        "please login and work!!!!")
                    print (60 * '-')
                    logger.info("Details about running containers..\n")
                    logger.info(
                        "Container IPs \t : %s\n ", connret.container_ips)

                    for c in connret.cons_ids:
                        c_id = dict(connret.cons_ids[0])['Id']
                        cons_ids.append(c_id)
                    logger.info("Container Ids \t : %s \n ", cons_ids)
                    print (60 * '-')
                    # todo : Its possible to auto login to these containers
                    # via below , commenting it out for now
                    # loginC(connret.container_ips, connret.cons_ids)
                    if gluster_mode:
                        gluster_cli = create_vol.glusteractions()

                        if gluster_cli:
                            logger.debug("Successfully created gluster client")
                            run_helper.rh_config_dict[
                                'SERVER_IP_ADDRS'] = connret.container_ips
                        else:
                            logger.error("Failed to create gluster client")
                        run_helper.con_pass = getpass.getpass()
                        if gluster_install:
                            ginst = gluster_config.get(
                                'GLUSTER_VERSION', '3.5')
                            if ginst:
                                gluster_cli.gluster_install(ginst)
                            else:
                                logger.debug(
                                    "Failed to get Gluster Version from dict.")
                        else:
                            logger.info("Gluster installation not required")
                        if gluster_volume:

                            run_helper.rh_config_dict[
                                'VOL_TYPE'] = gluster_config['VOL_TYPE']
                            run_helper.rh_config_dict['SERVER_EXPORT_DIR'] = \
                                gluster_config['SERVER_EXPORT_DIR']
                            run_helper.rh_config_dict['TRANS_TYPE'] = 'tcp'
                            run_helper.rh_config_dict[
                                'VOLNAME'] = gluster_config['VOLNAME']
                            logger.debug(
                                "Successfully filled configuration details:%s",
                                run_helper.rh_config_dict)
                            gluster_cli.create_gluster_volume(start=True)
                            logging.info(
                                'Gluster Volume operations done! '
                                'Please mount volume :%s in your client',
                                gluster_config['VOLNAME'])
                        else:
                            logger.debug(
                                "Gluster Volume creation not required")
                    else:
                        logger.info("Done!")
                else:
                    logger.error("Failed when starting/inspecting containers")
            else:
                logger.error(
                    "Image + tag does not exist.. "
                    "I cant start container from this..exiting")

                sys.exit(1)
        else:
            logger.debug("Not trying to start containers..")
            logger.info("Dockit finished...")
            return True

    except Exception as e:
        logger.critical("Failed on :%s", e)
        sys.exit(1)
예제 #17
0
    def checkprereq(self):
        """
        This function will check for pre-req packages
        which need to be installed and if its not available
        it will be installed
        """

        rhelflag = 0
        if sysdict['dist'] == "fedora":
            req_pcks = fedora_req_pcks
            rhelflag = 1
        elif sysdict['dist'] == "redhat":
            if sysdict['ver'] < 7:
                req_pcks = rhel_req_pcks
            else:
                req_pcks = rhel7_req_pcks
            rhelflag = 1
        elif sysdict['dist'] == "centos":
            if sysdict['ver'] < 7:
                req_pcks = centos_req_pcks
            else:
                req_pcks = centos7_req_pcks
            rhelflag = 1
        elif sysdict['dist'] == "Ubuntu":
            req_pcks = ubuntu_req_pcks
        else:
            logger.error("Unknown Distribution.")
            sys.exit(1)

        logger.info(
            "Distribution:%s Required %s packages \n\t \t \t "
            "Making yum transactions", sysdict['dist'], req_pcks)

        if rhelflag == 1:
            try:
                import yum
            except Exception as e:
                print "Error when importing yum module"
                sys.exit(1)
            yb = yum.YumBase()
            yb.conf.cache = os.geteuid() != 1
            for pck in req_pcks:
                if yb.rpmdb.searchNevra(name=pck):
                    logger.info("%s -> Installed" % (pck))
                    avail_pcks.append(pck)
                else:
                    logger.info("%s -> not installed" % (pck))
                    mis_pcks.append(pck)
                    if not self.skipflag:
                        try:
                            if pck == "python-docker-py":
                                logger.debug("Trying with pip")
                                cmd = "sudo pip install {0} -U "
                                cmd += ">/dev/null".format("docker-py")
                                os.system(cmd)
                                mis_pcks.remove(pck)
                            else:
                                logger.info(
                                    "Unknown package for me to install "
                                    "via pip.. Proceeding")
                        except Exception as e:
                            logger.error(e)
                            logger.error(
                                "Error occurred when trying to install %s "
                                "using pip -> Try to install "
                                "manually" % (pck))
                            sys.exit(1)
                        try:
                            yb.install(name=pck)
                            time.sleep(5)
                        except yum.Errors.InstallError, err:
                            logger.error(
                                "exiting : Error when installing "
                                "package %s", pck)
                            logger.error("%s", (str(err)))
                            sys.exit(1)
                        except Exception as e:
                            logger.critical(e)
                            logger.error(
                                "Error occurred when trying to install %s "
                                "-> Try to install manually" % (pck))
                            sys.exit(1)
예제 #18
0
    def runC(
        self,
        image_tag,
        gl_flag,
        gluster_dict={},
    ):
        """
        Run the container
        Creates a container that can then be `start`ed.
        Parameters are similar to those for the `docker run` command
        except it doesn't support the attach options (`-a`). See "Port bindings" and "Using volumes"  for
        more information on how to create port bindings and volume mappings.
        """

        #self.command="exec >/dev/tty 2>/dev/tty </dev/tty && /usr/bin/screen -s /bin/bash"

        self.c_tag = image_tag
        self.container_id = ""
        self.info_array = ["Hostname", "NetworkSettings"]
        self.brick_ext = 0
        self.gflag = gl_flag
        self.brick_set = []
        logger.info(" Create and start containers with image :%s ", self.c_tag)

        if self.gflag:
            bricks = gluster_dict['BRICKS']
            self.brick_set = []
            for b in bricks.split(','):
                self.brick_set.append(b)

            logger.info("Bricks will be using in order:%s", self.brick_set)

        logging.info("Enable Gluster Volume :%s", self.gflag)
        try:

            for num in range(0, self.dock_numc):

                if self.gflag:
                    self.brick_mount = gluster_dict['SERVER_EXPORT_DIR']
                    if len(self.brick_set) < self.dock_numc:

                        logger.critical(
                            "Number of bricks given to me is less than number of nodes,  check configfile"
                        )
                        return False
                    else:
                        print "..."

                    self.dock_command = ''
                    self.container_id = self.dc.create_container(
                        self.c_tag,
                        command=self.dock_command,
                        hostname=None,
                        user=None,
                        detach=True,
                        stdin_open=True,
                        tty=True,
                        mem_limit=0,
                        ports=[22, 80],
                        environment=None,
                        dns=None,
                        volumes=[self.brick_mount],
                        volumes_from=None,
                        network_disabled=False,
                        name=None,
                        entrypoint=None,
                        cpu_shares=None,
                        working_dir=None)

                else:
                    self.dock_command = '/bin/bash'
                    self.container_id = self.dc.create_container(
                        self.c_tag,
                        command=self.dock_command,
                        hostname=None,
                        user=None,
                        detach=True,
                        stdin_open=True,
                        tty=True,
                        mem_limit=0,
                        ports=[22, 80],
                        environment=None,
                        dns=None,
                        volumes=None,
                        volumes_from=None,
                        network_disabled=False,
                        name=None,
                        entrypoint=None,
                        cpu_shares=None,
                        working_dir=None)

                self.cons_ids.append(self.container_id)
            logger.debug("Container Ids : %s", self.cons_ids)
            if not self.cons_ids:
                logger.critical("Failed when creating Containers")
                return False

            for ids in self.cons_ids:
                try:
                    if self.gflag:
                        self.brick_source = self.brick_set[self.brick_ext]
                        self.brick_ext += 1
                        ret = self.dc.start(
                            ids,
                            binds={self.brick_source: self.brick_mount},
                            port_bindings={
                                22: None,
                                80: None
                            },
                            lxc_conf=None,
                            publish_all_ports=False,
                            links=None,
                            privileged=True)
                    else:

                        ret = self.dc.start(ids,
                                            binds=None,
                                            port_bindings={
                                                22: None,
                                                80: None
                                            },
                                            lxc_conf=None,
                                            publish_all_ports=False,
                                            links=None,
                                            privileged=True)
                    logger.debug("Container with ID :%s is started", ids)
                    time.sleep(10)

                except Exception as e:
                    logger.critical(
                        "Exception raised when starting Container with id:%s",
                        ids)

                    logger.error(e)
                    return False

            #cons =  self.dc.containers(quiet=False, all=False, trunc=True, latest=False, since=None,
            #                   before=None, limit=-1)
            #for c in cons:
            #   self.c_id =  dict(cons[0])['Id']
            #   self.cons_ids.append( self.c_id)

            logger.info("  Information about running containers ")

            for ids in self.cons_ids:
                try:
                    insp_obj = self.dc.inspect_container(ids)
                    hostname = insp_obj['Config']['Hostname']
                    ipaddr = insp_obj['NetworkSettings']['IPAddress']
                    if not ipaddr:
                        logger.critical("Not able to get IP address of %s",
                                        hostname)

                    self.container_ips.append(
                        insp_obj['NetworkSettings']['IPAddress'])
                except Exception as e:
                    logger.critical(
                        'Exception raised when inspecting Containers')
                    logger.debug(e)

                    return False

        except Exception as e:

            logger.critical(
                'Exception raised when creating/starting Containers')
            logger.debug(e)
            return False

        return True
예제 #19
0
                        sys.exit(1)
                    else:
                        try:

                            for pkgs in mis_pcks:
                                os_cmd = "sudo apt-get install -y %s && sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker >/dev/null" %(pkgs)
                                if os.system(os_cmd):
                                    print "Failed again to install %s package" % (pkgs)
                                    sys.exit(1)
                        except:
                            logger.exception("Exception occurred when trying to install packages in Ubuntu system..exiting")
                            sys.exit(1)

        except Exception as e:
            logger.critical("Exiting..%s", e)

            sys.exit(1)
        return True


class Procstart:
    """
        This class is defined for running process/deamon inside the system
    """

    def __init__(self, *args, **kwargs):

        for k, w in kwargs.items():
            logger.debug("%s :%s" % (k, w))
            self.proc = w
예제 #20
0
    def runC(self , image_tag, gl_flag, gluster_dict={}, ):
        """
        Run the container
        Creates a container that can then be `start`ed.
        Parameters are similar to those for the `docker run` command
        except it doesn't support the attach options (`-a`). See "Port bindings" and "Using volumes"  for
        more information on how to create port bindings and volume mappings.
        """

        #self.command="exec >/dev/tty 2>/dev/tty </dev/tty && /usr/bin/screen -s /bin/bash"

        self.c_tag = image_tag
        self.container_id = ""
        self.info_array = ["Hostname", "NetworkSettings"]
        self.brick_ext = 0
        self.gflag=gl_flag
        self.brick_set=[]
        logger.info(" Create and start containers with image :%s ", self.c_tag)

        if self.gflag:
            bricks =  gluster_dict['BRICKS']
            self.brick_set = []
            for b in bricks.split(','):
                self.brick_set.append(b)

            logger.info("Bricks will be using in order:%s", self.brick_set)

        logging.info( "Enable Gluster Volume :%s" ,self.gflag)
        try:

            for num in range(0,self.dock_numc):

                if self.gflag:
                    self.brick_mount=gluster_dict['SERVER_EXPORT_DIR']
                    if len(self.brick_set ) < self.dock_numc:

                        logger.critical("Number of bricks given to me is less than number of nodes,  check configfile")
                        return False
                    else:
                        print "..."

                    self.dock_command =''
                    self.container_id = self.dc.create_container(
                                                    self.c_tag, command=self.dock_command, hostname=None, user=None,
                                                    detach=True, stdin_open=True, tty=True, mem_limit=0,
                                                    ports=[22, 80], environment=None, dns=None, volumes=[self.brick_mount],
                                                    volumes_from=None, network_disabled=False, name=None,
                                                    entrypoint=None, cpu_shares=None, working_dir=None)

                else:
                    self.dock_command ='/bin/bash'
                    self.container_id = self.dc.create_container(
                                                    self.c_tag, command=self.dock_command, hostname=None, user=None,
                                                    detach=True, stdin_open=True, tty=True, mem_limit=0,
                                                    ports=[22, 80], environment=None, dns=None, volumes=None,
                                                    volumes_from=None, network_disabled=False, name=None,
                                                    entrypoint=None, cpu_shares=None, working_dir=None)

                self.cons_ids.append(self.container_id)
            logger.debug("Container Ids : %s" ,self.cons_ids)
            if not self.cons_ids:
                logger.critical( "Failed when creating Containers")
                return False

            for ids in self.cons_ids:
                try:
                    if self.gflag:
                        self.brick_source = self.brick_set[self.brick_ext]
                        self.brick_ext += 1
                        ret = self.dc.start(
                                            ids, binds={self.brick_source:self.brick_mount}, port_bindings={22: None, 80: None},
                                            lxc_conf=None,publish_all_ports=False, links=None, privileged=True)
                    else:

                        ret = self.dc.start(ids, binds=None, port_bindings={22: None, 80: None}, lxc_conf=None,
                                            publish_all_ports=False, links=None, privileged=True)
                    logger.debug("Container with ID :%s is started", ids)
                    time.sleep(10)

                except Exception as e:
                    logger.critical("Exception raised when starting Container with id:%s", ids)

                    logger.error(e)
                    return False


            #cons =  self.dc.containers(quiet=False, all=False, trunc=True, latest=False, since=None,
            #                   before=None, limit=-1)
            #for c in cons:
            #   self.c_id =  dict(cons[0])['Id']
            #   self.cons_ids.append( self.c_id)

            logger.info("  Information about running containers ")

            for ids in self.cons_ids:
                try:
                    insp_obj = self.dc.inspect_container(ids)
                    hostname =insp_obj['Config']['Hostname']
                    ipaddr = insp_obj['NetworkSettings']['IPAddress']
                    if not ipaddr :
                        logger.critical("Not able to get IP address of %s", hostname)

                    self.container_ips.append(insp_obj['NetworkSettings']['IPAddress'])
                except Exception as e:
                    logger.critical('Exception raised when inspecting Containers')
                    logger.debug(e)

                    return False

        except Exception as e:

            logger.critical('Exception raised when creating/starting Containers')
            logger.debug(e)
            return False

        return True
예제 #21
0
    def create_gluster_volume(self, start=True):

        nodes = run_helper.get_nodes_ip()
        logger.info("nodes are %s", nodes)

        masternode = nodes[0]
        export_dir = run_helper.get_server_export_dir()
        if export_dir is None:
            export_dir = '/rhs_bricks'
        vol_type = run_helper.get_volume_type()
        if vol_type is not None:
            volconfig = re.search(r'([0-9]+)x([0-9]+)x([0-9]+)', vol_type)
            distcount = volconfig.group(1)
            repcount = volconfig.group(2)
            stripecount = volconfig.group(3)
        else:
            distcount = '2'
            repcount = '2'
            stripecount = '1'

        trans_type = run_helper.get_trans_type()
        if trans_type == '':
            trans_type = 'tcp'

        volname = run_helper.get_vol_name()
        if volname == '':
            volname = 'hosdu'

        number_nodes = len(nodes)
        logger.info("Number of nodes: %s", number_nodes)
        if distcount == '0':
            distcount = 1
        if repcount == '0':
            repcount = 1
        if stripecount == '0':
            stripecount = 1

        number_bricks = int(distcount) * int(repcount) * int(stripecount)

        logger.info("number of bricks:%s", number_bricks)

        if number_bricks > number_nodes:
            logger.critical(
                "number of bricks and number of servers don't match.\n")
            logger.critical("The support to have more than 1 brick per "
                            "container is not there yet \n")
            return 1

        if repcount == '1':
            replica_count = ''
        else:
            replica_count = "replica %s" % repcount

        if stripecount == '1':
            stripe_count = ''
        else:
            stripe_count = "stripe %s" % stripecount
        # pre_create_cleanup(nodes, export_dir)

        brick_list = []
        node_index = 0
        for i in range(0, number_bricks):
            brick = "%s:%s/%s_brick%d" % (nodes[node_index], export_dir,
                                          volname, i)
            brick_list.append(brick)
            node_index = node_index + 1
            if node_index > number_nodes:
                node_index = 0

        vol_create_cmd = ("gluster --mode=script volume create %s %s %s "
                          "transport %s %s force" %
                          (volname, replica_count, stripe_count, trans_type,
                           ' '.join(brick_list)))

        flag = 0
        for node in nodes:
            status = run_helper.run_command(node, 'pgrep glusterd || glusterd',
                                            True)
            if status:
                logger.error('glusterd can not be started in node: %s', node)
                flag = 1

        if flag:
            logger.info(
                'glusterd can not be started successfully in all nodes. '
                'Exiting...')
            sys.exit(1)

        flag = 0
        for node in nodes:
            if node != masternode:
                status = run_helper.run_command(masternode,
                                                'gluster peer probe ' + node,
                                                False)
                time.sleep(20)
                if status:
                    logger.error('peer probe went wrong in %s', node)
                    flag = 1

        if flag:
            logger.critical(
                'Peer probe went wrong in some machines. Exiting...')
            sys.exit(1)

        status = run_helper.run_command(masternode, vol_create_cmd, True)
        if status:
            logger.critical('volume creation failed.')

        if status == 0 and start:
            status = run_helper.run_command(
                masternode, "gluster --mode=script volume start %s" % volname,
                False)

        return status
예제 #22
0
    def create_gluster_volume(self,start=True):

        nodes = run_helper.get_nodes_ip()
        logger.info( "nodes are %s" , nodes)

        masternode = nodes[0]
        export_dir = run_helper.get_server_export_dir()
        if export_dir == None:
            export_dir = '/rhs_bricks'
        vol_type = run_helper.get_volume_type()
        if vol_type != None:
            volconfig = re.search(r'([0-9]+)x([0-9]+)x([0-9]+)', vol_type)
            distcount = volconfig.group(1)
            repcount = volconfig.group(2)
            stripecount = volconfig.group(3)
        else:
            distcount = '2'
            repcount = '2'
            stripecount = '1'

        trans_type = run_helper.get_trans_type()
        if trans_type == '':
            trans_type = 'tcp'

        volname = run_helper.get_vol_name()
        if volname == '':
            volname = 'hosdu'

        number_nodes = len(nodes)
        logger.info( "Number of nodes: %s" ,number_nodes)
        if distcount == '0':
            distcount =  1
        if repcount == '0':
            repcount =  1
        if stripecount == '0':
            stripecount =  1

        number_bricks = int(distcount) * int(repcount) * int(stripecount)

        logger.info( "number of bricks:%s" , number_bricks)

        if number_bricks > number_nodes:
            logger.critical("number of bricks and number of servers don't match.\n")
            logger.critical("The support to have more than 1 brick per container is not there yet \n")
            return 1

        if repcount == '1':
            replica_count = ''
        else:
            replica_count = "replica %s" % repcount

        if stripecount == '1':
            stripe_count = ''
        else:
            stripe_count = "stripe %s" % stripecount
        #pre_create_cleanup(nodes, export_dir)

        brick_list = []
        node_index = 0
        for i in range(0, number_bricks):
            brick = "%s:%s/%s_brick%d" % (nodes[node_index], export_dir, volname, i)
            brick_list.append(brick)
            node_index = node_index + 1
            if node_index > number_nodes:
                node_index = 0

        vol_create_cmd = "gluster --mode=script volume create %s %s %s transport %s %s force" % (volname, replica_count, stripe_count, trans_type, ' '.join(brick_list))

        flag = 0
        for node in nodes:
            status = run_helper.run_command(node, 'pgrep glusterd || glusterd', True)
            if status:
                logger.error('glusterd can not be started in node: %s' , node)
                flag = 1

        if flag:
            logger.info('glusterd can not be started successfully in all nodes. Exiting...')
            sys.exit(1)

        flag = 0
        for node in nodes:
            if node != masternode:
                status = run_helper.run_command(masternode, 'gluster peer probe ' + node, False)
                time.sleep(20)
                if status:
                    logger.error('peer probe went wrong in %s' , node)
                    flag = 1

        if flag:
            logger.critical('Peer probe went wrong in some machines. Exiting...')
            sys.exit(1)

        status = run_helper.run_command(masternode, vol_create_cmd, True)
        if status:
            logger.critical('volume creation failed.')

        if status == 0 and start == True:
            status = run_helper.run_command(masternode, "gluster --mode=script volume start %s" % volname, False)

        return status
예제 #23
0
    def gluster_install(self, version):
        failed_package_nodes = []
        failed_install_nodes = []

        nodes = run_helper.get_nodes_ip()
        logger.info("Trying to install gluster on %s nodes ", nodes)

        gluster_package_command = (
            'yum -y install python-devel python-setuptools deltarpm yum-utils '
            'gcc git autoconf automake bison dos2unix flex glib2-devel '
            'libaio-devel libattr-devel libibverbs-devel '
            'librdmacm-devel libtool libxml2-devel make openssl-devel '
            'pkgconfig python-devel python-eventlet python-netifaces '
            'python-paste-deploy python-simplejson python-sphinx '
            'python-webob pyxattr readline-devel rpm-build gdb dbench '
            'net-tools systemtap-sdt-devel attr psmisc findutils which '
            'xfsprogs yajl-devel lvm2-devel e2fsprogs mock nfs-utils '
            'openssh-server supervisor openssl fuse-libs wget >/dev/null')
        # gluster_package_command='ls'
        # gluster_install_command = 'cd /root/glusterfs && make install'
        gluster_install_command = (
            "rm -rf /root/glusterfs && cd /root && git clone "
            "git://review.gluster.org/glusterfs && cd glusterfs "
            "&& git checkout -b {0} origin/release-{1}  && "
            "./autogen.sh >/dev/null && ./configure>/dev/null "
            "&& make >/dev/null && make install> /dev/null ".format(
                version, version))

        for node in nodes:
            status1 = status2 = 0
            logger.info("Configuring/installing on node:%s", node)
            status1 = run_helper.run_command(node, gluster_package_command,
                                             True)

            if status1:
                logger.error(
                    'Required Gluster package installation '
                    'failed on node: %s', node)
                failed_package_nodes.append(node)
            else:
                logger.info("Continuing ..")
                status2 = run_helper.run_command(node, gluster_install_command,
                                                 True)
                time.sleep(20)
                if status2:
                    logger.error("Failed to configure GlusterFs from "
                                 "source repository ")
                    failed_install_nodes.append(node)
                else:
                    logger.info(
                        "Successfully configured GlusterFS "
                        "binary on node:%s", node)

        if status1 or status2:
            logger.critical(
                "Failed to install gluster packages on:%s or GlusterFs "
                "binary installation failed on :%s ", failed_package_nodes,
                failed_install_nodes)
        else:
            logger.info(
                "Successful Gluster Package Installation and GlusterFS "
                "Binary installation on all the nodes!")
        return
예제 #24
0
    def buildC(self):
        """
                        #Eqvnt to `docker build` command. Either `path` or `fileobj` needs
                        #to be set. `path` can be a local path (to a directory containing a
                        #Dockerfile) or a remote URL. `fileobj` must be a readable file-like
                        #object to a Dockerfile.

                        # Best option would be to create a docker file according to the input
                        # provided from the command line:
                        # That said, suppose if we specify fedora 2*2 it should spawn
                        # 4 containers with gluster packages installed

                        # An easy way of doing this is ship repos with glusterd running
                        # for different distros. so that that image can be mentioned in
                        # in the docker file.
        """
        #todo: Add support for github url

        self.waitsecs = 15
        logger.debug("Timeout for build process has been set to :%s minutes",
                     self.waitsecs)
        try:
            logger.info("Working on docker file :%s .. Please wait..",
                        self.dock_filepath)
            if self.dock_filepath:
                logger.info(
                    "Build it with dockerfile:%s \t and Tag: %s and self.dc: %s .."
                    "need to wait ..", self.dock_filepath, self.dock_tag,
                    self.dc)
                #ret = self.dc.build( path=self.dock_filepath, tag=self.dock_tag, quiet=False, fileobj=None, nocache=False,
                #                   rm=False, stream=False)
                ret = self.dc.build(path=self.dock_filepath, tag=self.dock_tag)

                if ret:
                    while (self.waitsecs >= 0):
                        time.sleep(60)
                        logger.debug("Fetching docker images and tags for %s",
                                     self.dock_tag)

                        # More optimization can be as shown below.. how-ever disabling it for now:
                        #if next((image for image in self.dc.images() if self.dock_tag in image['RepoTags']), None):
                        #    logger.debug("Image found with tag")
                        #    return True
                        #else:
                        #    logger.debug("Failed to find requested tagged image in first attempt")
                        #    self.waitsecs =- 180

                        self.images = self.dc.images(name=None,
                                                     quiet=False,
                                                     all=False,
                                                     viz=False)
                        for im in self.images:
                            for tag in im['RepoTags']:
                                if self.dock_tag in str(tag):
                                    logger.info(
                                        "dock_tag:%s  successfully built and available in repo",
                                        self.dock_tag)
                                    #return True
                                    return im

                        self.waitsecs = self.waitsecs - 1

                    return False

                else:
                    logger.critical("Failed to build docker image")
            else:
                logger.critical("I am sorry, I cant build without dockerfile")
                return False

        except Exception as e:
            logger.debug("Failed build: ...")
            logger.debug(e)
            return False