コード例 #1
0
def reconnect(wait=120):
    timeout = 5
    attempts = int(round(wait / float(timeout)))
    with settings(hide('running'),
                  timeout=timeout,
                  connection_attempts=attempts):
        connections.connect(env.host_string)
コード例 #2
0
def reboot_and_wait(wait=600, command='reboot'):
    """Reboot the remote system.

    Args:
        wait: Time to wait remote system after reboot in seconds.
        command: Command for rebooting remote system.

    Returns:
        None
    """
    # Shorter timeout for a more granular cycle than the default.
    timeout = 10
    # Use 'wait' as max total wait time
    attempts = int(round(float(wait) / float(timeout)))
    # Don't bleed settings, since this is supposed to be self-contained.
    # User adaptations will probably want to drop the "with settings()" and
    # just have globally set timeout/attempts values.
    with settings(timeout=timeout,
                  connection_attempts=attempts,
                  warn_only=True):
        run(command)
        # Try to make sure we don't slip in before pre-reboot lockdown
        time.sleep(10)
        # This is actually an internal-ish API call, but users can simply drop
        # it in real fabfile use -- the next run/sudo/put/get/etc call will
        # automatically trigger a reconnect.
        # We use it here to force the reconnect while this function is still in
        # control and has the above timeout settings enabled.
        connections.connect(env.host_string)
コード例 #3
0
def reconnect(wait=120):
    timeout = 5
    attempts = int(round(wait / float(timeout)))
    with settings(
        hide('running'),
        timeout=timeout,
        connection_attempts=attempts
    ):
        connections.connect(env.host_string)
コード例 #4
0
ファイル: util.py プロジェクト: ushamani/contrail-test
def wait_for_ssh_on_node(host_string, password=None, logger=None):
    logger = logger or contrail_logging.getLogger(__name__)
    try:
        with settings(host_string=host_string, password=password):
            fab_connections.connect(host_string)
    except Exception, e:
        # There can be different kinds of exceptions. Catch all
        logger.debug('Host: %s, password: %s Unable to connect yet. Got: %s' %
                     (host_string, password, e))
        return False
コード例 #5
0
ファイル: util.py プロジェクト: Juniper/contrail-test
def wait_for_ssh_on_node(host_string, password=None, logger=None):
    logger = logger or contrail_logging.getLogger(__name__)
    try:
        with settings(host_string=host_string, password=password):
            fab_connections.connect(host_string)
    except Exception, e:
        # There can be different kinds of exceptions. Catch all
        logger.debug('Host: %s, password: %s Unable to connect yet. Got: %s' % (
            host_string, password, e))
        return False
コード例 #6
0
ファイル: client.py プロジェクト: zhengxueying/yuki
 def connect_host(self, host):
     wait = 250
     timeout = 5
     attempts = int(round(float(wait) / float(timeout)))
     with settings(hide('running'),
                   timeout=timeout,
                   connection_attempts=attempts):
         time.sleep(5)
         print("try connect to host %s" % host)
         connections.connect(host)
     return self
コード例 #7
0
ファイル: operations.py プロジェクト: GoodDingo/fabric
def reboot(wait=120):
    """
    Reboot the remote system.

    Will temporarily tweak Fabric's reconnection settings (:ref:`timeout` and
    :ref:`connection-attempts`) to ensure that reconnection does not give up
    for at least ``wait`` seconds.

    .. note::
        As of Fabric 1.4, the ability to reconnect partway through a session no
        longer requires use of internal APIs.  While we are not officially
        deprecating this function, adding more features to it will not be a
        priority.

        Users who want greater control
        are encouraged to check out this function's (6 lines long, well
        commented) source code and write their own adaptation using different
        timeout/attempt values or additional logic.

    .. versionadded:: 0.9.2
    .. versionchanged:: 1.4
        Changed the ``wait`` kwarg to be optional, and refactored to leverage
        the new reconnection functionality; it may not actually have to wait
        for ``wait`` seconds before reconnecting.
    """
    # Shorter timeout for a more granular cycle than the default.
    timeout = 5
    # Use 'wait' as max total wait time
    attempts = int(round(float(wait) / float(timeout)))
    # Don't bleed settings, since this is supposed to be self-contained.
    # User adaptations will probably want to drop the "with settings()" and
    # just have globally set timeout/attempts values.
    with settings(
        hide('running'),
        timeout=timeout,
        connection_attempts=attempts
    ):
        sudo('reboot')
        # Try to make sure we don't slip in before pre-reboot lockdown
        time.sleep(5)
        # This is actually an internal-ish API call, but users can simply drop
        # it in real fabfile use -- the next run/sudo/put/get/etc call will
        # automatically trigger a reconnect.
        # We use it here to force the reconnect while this function is still in
        # control and has the above timeout settings enabled.
        connections.connect(env.host_string)
コード例 #8
0
ファイル: fabfile.py プロジェクト: brainu/NewsBlur
def setup_ulimit():
    # Increase File Descriptor limits.
    run('export FILEMAX=`sysctl -n fs.file-max`', pty=False)
    sudo('mv /etc/security/limits.conf /etc/security/limits.conf.bak', pty=False)
    sudo('touch /etc/security/limits.conf', pty=False)
    sudo('chmod 666 /etc/security/limits.conf', pty=False)
    run('echo "root soft nofile 100000" >> /etc/security/limits.conf', pty=False)
    run('echo "root hard nofile 100000" >> /etc/security/limits.conf', pty=False)
    run('echo "* soft nofile 100000" >> /etc/security/limits.conf', pty=False)
    run('echo "* hard nofile 100090" >> /etc/security/limits.conf', pty=False)
    sudo('chmod 644 /etc/security/limits.conf', pty=False)
    sudo('chmod 666 /etc/sysctl.conf', pty=False)
    run('echo "fs.file-max = 100000" >> /etc/sysctl.conf', pty=False)
    sudo('chmod 644 /etc/sysctl.conf', pty=False)
    sudo('sysctl -p')
    sudo('ulimit -n 100000')
    connections.connect(env.host_string)
コード例 #9
0
def reboot(wait=120):
    """
    Reboot the remote system.

    Will temporarily tweak Fabric's reconnection settings (:ref:`timeout` and
    :ref:`connection-attempts`) to ensure that reconnection does not give up
    for at least ``wait`` seconds.

    .. note::
        As of Fabric 1.4, the ability to reconnect partway through a session no
        longer requires use of internal APIs.  While we are not officially
        deprecating this function, adding more features to it will not be a
        priority.

        Users who want greater control
        are encouraged to check out this function's (6 lines long, well
        commented) source code and write their own adaptation using different
        timeout/attempt values or additional logic.

    .. versionadded:: 0.9.2
    .. versionchanged:: 1.4
        Changed the ``wait`` kwarg to be optional, and refactored to leverage
        the new reconnection functionality; it may not actually have to wait
        for ``wait`` seconds before reconnecting.
    """
    # Shorter timeout for a more granular cycle than the default.
    timeout = 5
    # Use 'wait' as max total wait time
    attempts = int(round(float(wait) / float(timeout)))
    # Don't bleed settings, since this is supposed to be self-contained.
    # User adaptations will probably want to drop the "with settings()" and
    # just have globally set timeout/attempts values.
    with settings(
        hide('running'),
        timeout=timeout,
        connection_attempts=attempts
    ):
        sudo('reboot')
        # Try to make sure we don't slip in before pre-reboot lockdown
        time.sleep(5)
        # This is actually an internal-ish API call, but users can simply drop
        # it in real fabfile use -- the next run/sudo/put/get/etc call will
        # automatically trigger a reconnect.
        # We use it here to force the reconnect while this function is still in
        # control and has the above timeout settings enabled.
        connections.connect(env.host_string)
コード例 #10
0
ファイル: fabfile.py プロジェクト: lzyzsd/NewsBlur
def setup_ulimit():
    # Increase File Descriptor limits.
    run('export FILEMAX=`sysctl -n fs.file-max`', pty=False)
    sudo('mv /etc/security/limits.conf /etc/security/limits.conf.bak', pty=False)
    sudo('touch /etc/security/limits.conf', pty=False)
    sudo('chmod 666 /etc/security/limits.conf', pty=False)
    run('echo "root soft nofile 100000" >> /etc/security/limits.conf', pty=False)
    run('echo "root hard nofile 100000" >> /etc/security/limits.conf', pty=False)
    run('echo "* soft nofile 100000" >> /etc/security/limits.conf', pty=False)
    run('echo "* hard nofile 100090" >> /etc/security/limits.conf', pty=False)
    sudo('chmod 644 /etc/security/limits.conf', pty=False)
    sudo('chmod 666 /etc/sysctl.conf', pty=False)
    run('echo "fs.file-max = 100000" >> /etc/sysctl.conf', pty=False)
    sudo('chmod 644 /etc/sysctl.conf', pty=False)
    sudo('sysctl -p')
    sudo('ulimit -n 100000')
    connections.connect(env.host_string)
コード例 #11
0
ファイル: api.py プロジェクト: aychedee/kubrick
 def reboot(self):
     self.run('reboot')
     time.sleep(30)
     print "Reconnecting",
     sys.stdout.flush()
     for retry in range(60):
         try:
             print ".",
             sys.stdout.flush()
             env.host_string = self.host_string
             connections.connect(env.host_string)
             break
         except:
             print "-",
             sys.stdout.flush()
             time.sleep(3)
     print
コード例 #12
0
def reboot(wait=120, host=None):
    # Shorter timeout for a more granular cycle than the default.
    timeout = 5
    # Use 'wait' as max total wait time
    attempts = int(round(wait / float(timeout)))
    # Set host
    if not host:
        host = env.host
    with settings(hide('running'),
                  timeout=timeout,
                  connection_attempts=attempts,
                  host=host):
        sudo('reboot')
        # Try to make sure we don't slip in before pre-reboot lockdown
        time.sleep(30)
        # This is actually an internal-ish API call, but users can simply drop
        # it in real fabfile use -- the next run/sudo/put/get/etc call will
        # automatically trigger a reconnect.
        # We use it here to force the reconnect while this function is still in
        # control and has the above timeout settings enabled.
        connections.connect(env.host_string)
コード例 #13
0
def reboot(wait=120, host=None):
    # Shorter timeout for a more granular cycle than the default.
    timeout = 5
    # Use 'wait' as max total wait time
    attempts = int(round(wait / float(timeout)))
    # Set host
    if not host:
        host = env.host
    with settings(
        hide('running'),
        timeout=timeout,
        connection_attempts=attempts,
        host=host
    ):
        sudo('reboot') 
        # Try to make sure we don't slip in before pre-reboot lockdown
        time.sleep(30)
        # This is actually an internal-ish API call, but users can simply drop
        # it in real fabfile use -- the next run/sudo/put/get/etc call will
        # automatically trigger a reconnect.
        # We use it here to force the reconnect while this function is still in
        # control and has the above timeout settings enabled.
        connections.connect(env.host_string)
コード例 #14
0
ファイル: utils.py プロジェクト: qz267/fabtest
def force_ssh_reconnect():
    connections.connect(env.host_string)
コード例 #15
0
ファイル: utils.py プロジェクト: kmike/fabtest
def force_ssh_reconnect():
    connections.connect(env.host_string)
コード例 #16
0
ファイル: smgr_common.py プロジェクト: rishiv/contrail-test
    def reimage(self, no_pkg=False):
        """ using svrmgr, reimage all the nodes """

        result = True
        image_id = self.get_image_id()
        pkg_id = self.get_pkg_id()
        cluster_id = self.get_cluster_id()
        svrmgr = self.get_svrmgr()

        with  settings(host_string=svrmgr, warn_only=True):
            run('server-manager show all | python -m json.tool')
            if no_pkg:
                output=run('server-manager reimage --cluster_id %s  %s' %(cluster_id,image_id))
            else:
                output=run('server-manager reimage --package_image_id %s --cluster_id %s  %s' %(pkg_id,cluster_id,image_id))
            if "reimage issued" not in output:
                self.logger.warn("Reimage command was not successfull")

        if not self.verify_server_status("reimage_issued") :
           self.logger.error("server status \"reimage_issued\" not correctly updated")
           result = result and False
        self.logger.info("Server Rebooted. Going to sleep for %d seconds...." %REIMAGE_WAIT)
        sleep(REIMAGE_WAIT)

        user = "******"
        server_state = {}

        server_file = self.get_server_file()
        in_file = open( server_file, 'r' )
        in_data = in_file.read()
        server_dict = json.loads(in_data)

        for  node in server_dict['server']:
            server_ip = node['ip_address']
            server_state[server_ip] = False

        for retry in range(SERVER_RETRY_TIME):
          for  node in server_dict['server']:
            server_ip = node['ip_address']
            if not verify_sshd(server_ip, user, env.password):
               sleep(1)
               self.logger.info("Node %s not reachable....retrying" %(server_ip))
               server_state[server_ip] = False
            else:
               self.logger.info("Node %s is UP" %(server_ip))
               if  server_state[server_ip] == False:
                   target_node = '%s@%s' %(user,server_ip)
                   with settings( host_string = target_node ):
                       connections.connect(env.host_string)
                   with settings( host_string = target_node ) :
                       output = run('uptime')
                       uptime = int(output.split()[2])
                       if uptime > 3 :
                           raise RuntimeError('Restart failed for Host (%s)' %server_ip)
                       else :
                           self.logger.info("Node %s has rebooted and UP now" %(server_ip))
                           if not no_pkg:
                               output = run('dpkg -l | grep contrail')
                               match = re.search('contrail-fabric-utils\s+(\S+)\s+', output, re.M)
                               if pkg_id not in match.group(1) :
                                   raise RuntimeError('Reimage not able to download package %s on targetNode (%s)' \
                                                  %(pkg_id, server_ip) )
                               match = re.search('contrail-install-packages\s+(\S+)\s+', output, re.M)
                               if pkg_id not in match.group(1) :
                                   raise RuntimeError('Reimage not able to download package %s on targetNode (%s)' \
                                                  %(pkg_id, server_ip) )
                           server_state[server_ip] = True

          #End for  node in server_dict['server']:

          cluster_state = True
          for key in server_state:
            cluster_state = cluster_state and server_state[key]

          if cluster_state == True:
            break
          #End for key in server:

        #End for retry in range(SERVER_RETRY_TIME):

        if not cluster_state:
            raise RuntimeError('Unable to SSH to one or more Host ' )

        if not self.verify_server_status("datacenter", "demo-dc", "reimage_completed") :
           result = result and False

        return result