Beispiel #1
0
def ssh_push_file(src_file, dst_dir, username, address, silent=False):
    if not os.path.isfile(src_file):
        print("Not file, skip pushing " + src_file)
        return
    src_checksum = bench_utils.file_checksum(src_file)
    dst_file = os.path.join(dst_dir, os.path.basename(src_file))
    stdout_buff = []
    try:
        sh.ssh('%s@%s' % (username, address),
               "md5sum",
               dst_file,
               _out=lambda line: stdout_buff.append(line))
    except sh.ErrorReturnCode_1:
        print("Scp %s to %s" % (src_file, dst_dir))
        sh.ssh('%s@%s' % (username, address), "mkdir -p %s" % dst_dir)
        sh.scp(src_file, '%s@%s:%s' % (username, address, dst_dir))
    else:
        dst_checksum = stdout_buff[0].split()[0]
        if src_checksum == dst_checksum:
            if not silent:
                print("Equal checksum with %s and %s" % (src_file, dst_file))
        else:
            if not silent:
                print("Scp %s to %s" % (src_file, dst_dir))
            sh.scp(src_file, '%s@%s:%s' % (username, address, dst_dir))
def logging_icer():
    """
    start jupyter notebook. (tasks to do after logging on the icer)
    """
    global global_status, parsed_args

    ssh_develop_node = f"ssh {parsed_args.develop_login}\n"
    salloc_job = f"salloc --time={parsed_args.time} -c {parsed_args.cores} --mem-per-cpu={parsed_args.memory}\n"
    handle_permission = f"export XDG_RUNTIME_DIR=''\n"
    cd_directory = f"cd {parsed_args.directory}\n"
    start_jupyter_notebook = f"jupyter notebook --NotebookApp.token='' --port={parsed_args.port}\n"
    load_gpu_module = f"module purge;module load CUDA/9.0.176 cuDNN/7.0.2-CUDA-9.0.176;module load GCC/5.4.0-2.26 OpenMPI/1.10.3;"
    to_sleep = f"sleep 1\n"
    logger.info(f"start to execute command on icer")
    if(parsed_args.gpu):
        if(not parsed_args.node):
            stdin_list = [to_sleep, ssh_develop_node, to_sleep, salloc_job,
                          handle_permission, load_gpu_module, cd_directory, start_jupyter_notebook]
        else:
            stdin_list = [to_sleep, ssh_develop_node, to_sleep,
                          handle_permission, load_gpu_module, cd_directory, start_jupyter_notebook]
    else:
        if(not parsed_args.node):
            stdin_list = [to_sleep, ssh_develop_node, to_sleep, salloc_job,
                          handle_permission, cd_directory, start_jupyter_notebook]
        else:
            stdin_list = [to_sleep, ssh_develop_node, to_sleep,
                          handle_permission, cd_directory, start_jupyter_notebook]

    sh.ssh(f"{parsed_args.user}@hpcc.msu.edu",
           _out=ssh_interact_jupyter,  _tty_in=True, _in=stdin_list)
Beispiel #3
0
def clean_up_tars(config: dict, destination: str, tar_filename: str):
    click.echo('{} Deleting local and remote tar files'.format(log_prefix()))

    # ssh -i ~/keys/hello.key [email protected] rm local_scripts.tar.gz
    ssh('-i', config['private_key_path'], destination, 'rm', tar_filename)

    os.remove(tar_filename)
Beispiel #4
0
 def __init__(self, device_dict):
     """
     init device with device dict info
     :type device_dict: Device
     :param device_dict: a key-value dict that holds the device information,
                    which attribute has:
                    device_name, target_abis, target_socs, system,
                     address, username
     """
     diff = set(device_dict.keys()) - set(YAMLKeyword.__dict__.keys())
     if len(diff) > 0:
         six.print_('Wrong key detected: ')
         six.print_(diff)
         raise KeyError(str(diff))
     self.__dict__.update(device_dict)
     if self.system == SystemType.android:
         self.data_dir = PHONE_DATA_DIR
         self.interior_dir = self.data_dir + '/interior'
     elif self.system == SystemType.arm_linux:
         try:
             sh.ssh('-q', '{}@{}'.format(self.username, self.address),
                    'exit')
         except sh.ErrorReturnCode as e:
             six.print_('device connect failed, '
                        'please check your authentication')
             raise e
         self.data_dir = DEVICE_DATA_DIR
         self.interior_dir = self.data_dir + '/interior'
 def strobe_kill(self):
     try:
         ssh("-i", self.node_config["ssh_key"],
             self.node_config["ssh_user"] + "@" + self.node_config["host"],
             self.node_config["strobe_kill_api_script"])
     except sh.ErrorReturnCode:
         pass
def handle_ssh_tunnel():
    """
    set ssh tunnel when having started the jupyter notebook.
    """
    global global_status, parsed_args

    sh.ssh("-4", "-t", "-Y",
           f"{parsed_args.user}@hpcc.msu.edu", "-L", f"{parsed_args.port}:localhost:{parsed_args.port}", "ssh", "-t", "-Y", global_status["hostname"], "-L", f"{parsed_args.port}:localhost:{parsed_args.port}", _out=ssh_interact_tunnel, _tty_in=True)
Beispiel #7
0
 def stop(self):
     print('Stopping service: ', self.name)
     if settings.TEST_SERVER == 'localhost':
         pids = sh.pgrep('-f', 'manage.py runserver', _ok_code=[0, 1])
         for pid in pids:
             sh.kill(pid.rstrip())
     else:
         sh.ssh(settings.TEST_SERVER, 'sudo supervisorctl stop all')
Beispiel #8
0
 def go(ec2_id):
     """
         Go on an ec2
     """
     ip = resource('ec2').Instance(ec2_id).public_ip_address
     config = Parser.parse('instances.ini')
     key_path = Security.get_default_key_path()
     sh.ssh("-i", key_path, config["INSTANCES"]["ami_user"]+"@"+ip)
Beispiel #9
0
def ssh_interact(char, aggregated):
    """automate connecting to an ssh server"""
    stdout.write(char.encode())
    aggregated += char
    if aggregated.endswith("password: "******"correcthorsebatterystaple\n")
        _out = os.fdopen(sys.stderr, "wb", 0)
        ssh("9.10.10.100", _out=ssh_interact, _out_bufsize=0, _tty_in=True)
    return _out
Beispiel #10
0
def ssh_push(src_path, dst_dir, username, address, silent=False):
    if os.path.isdir(src_path):
        print("Start scp dir %s=>%s, basename=%s"
              % (src_path, dst_dir, os.path.basename(src_path)))
        sh.scp("-r", src_path, '%s@%s:%s' % (username, address, dst_dir))
        tmp_dst_dir = os.path.join(dst_dir, os.path.basename(src_path))
        sh.ssh('%s@%s' % (username, address),
               "mv %s/* %s" % (tmp_dst_dir, dst_dir))
    else:
        ssh_push_file(src_path, dst_dir, username, address, silent)
Beispiel #11
0
    def scan_dcms(hosts_list, topo=None, dst_network='10.193.160.0/19', ignore_ip=None, dump_file=None):
        """Returns a dictionary of all Dcms in the topology.

        SSH is used to get a list of TCP connections on every end-node
        in the topology. This list is filtered Dcm by Dcm to get all incoming
        TCP flows.

        Args:
            hosts_list: A list of dictonaries containing 'hostname' with host to be scanned,
                        and 'node' containing the name of the Bridge running on that host
                        (or the last hop).
            topo: A Topology object.
            dst_network: A string with the network carrying the Dcm flows.
            ignore_ip: Ignores TCP flows from this IP.
            dump_file: For dumping a dict with Dcms.

        Returns:
            A dict with Dcm objects.
        """

        dcms = {}
        i = 0
        for h in hosts_list:
            host = h['hostname']
            node = h['node']
            tcp_flows = ssh(host, "/usr/sbin/ss -p dst " + dst_network).stdout.strip()
            try:
                # Get all distinct Dcm processes
                dcm_pids = ssh(host, "ps -C dcm_main -o pid --sort -pcpu --no-headers").stdout.split()
            except sh.ErrorReturnCode_1:
                continue
            ndcm = 0
            for p in dcm_pids:
                ndcm += 1
                i += 1
                inflows = []
                ippat = '\d+\.\d+\.\d+\.\d+'
                # regex pattern to get the TCP flows (4-tuple) of a particular Dcm process
                pattern = '\S+\s+\S+\s+\S+\s+('+ippat+'):(\d*)\s*('+ippat+'):(\d*).*\"dcm_main\",' + str(p) + '.*'
                r = re.compile(pattern)
                for m in re.finditer(r, tcp_flows):
                    if ignore_ip == m.group(3):
                        continue
                    # incoming Dcm flows: (dport, sip, sport)
                    inflows.append( (int(m.group(2)), m.group(3), int(m.group(4))) )
                    # dip is the same for all flows
                    dip = m.group(1)
                dcms[host + '-' + str(ndcm)] = Dcm(i, topo, node, dip, inflows, int(p)) 

        if dump_file:
            with open(dump_file, 'wb') as output:
                pickler = pickle.Pickler(output, -1)
                pickler.dump(dcms)

        return dcms
Beispiel #12
0
def remove_qcow(image,
                pool='',
                cephhost='',
                cephuser='',
                cephcluster='',
                snap_naming_date_format='',
                snap_date='',
                snap='',
                noop=None):
    """ ssh to ceph node and remove a qcow from path
        qcow_temp_path/pool/imagename.qcow2
    """
    logger = logs.get_logger()
    if not snap_naming_date_format:
        snap_naming_date_format = settings.SNAP_NAMING_DATE_FORMAT
    if not snap_date:
        snap_date = settings.SNAP_DATE
    if not snap:
        snap = get_snapdate(snap_naming_date_format=snap_naming_date_format,
                            snap_date=snap_date)
    if not pool:
        pool = settings.POOL
    if not cephhost:
        cephhost = settings.CEPH_HOST
    if not cephuser:
        cephuser = settings.CEPH_USER
    # TODO use ceph cluster in path naming
    if not cephcluster:
        cephcluster = settings.CEPH_CLUSTER
    if not noop:
        noop = settings.NOOP
    temp_qcow_file = ("%s/%s/%s@%s.qcow2" %
                      (settings.QCOW_TEMP_PATH, settings.POOL, image, snap))
    logger.info("deleting temp qcow from path %s on ceph host %s" %
                (temp_qcow_file, cephhost))
    SSH_RM_QCOW_COMMAND = 'rm %s' % temp_qcow_file
    try:
        if settings.NOOP:
            logger.info('NOOP: would have removed temp qcow for image %s from'
                        ' ceph host %s with command %s' %
                        (image, cephhost, SSH_RM_QCOW_COMMAND))
        else:
            sh.ssh(cephhost, SSH_RM_QCOW_COMMAND)
    except sh.ErrorReturnCode as e:
        logger.error('error removing temp qcow %s with error from ssh:' %
                     temp_qcow_file)
        logger.exception(e.stderr)
        raise
    except Exception as e:
        logger.error('error removing temp qcow %s' % temp_qcow_file)
        logger.exception(e)
        raise
    logger.info("successfully removed qcow for %s" % image)
    return True
Beispiel #13
0
 def _thread_function(unit: str) -> bool:
     click.echo(f"Executing `{core_command}` on {unit}.")
     try:
         ssh(unit, command)
         return True
     except Exception as e:
         logger = create_logger("CLI",
                                unit=get_unit_name(),
                                experiment=get_latest_experiment_name())
         logger.debug(e, exc_info=True)
         logger.error(f"Unable to connect to unit {unit}.")
         return False
Beispiel #14
0
 def execute_on_opsman(self, opts, cmd):
     host = urlparse.urlparse(self.url).netloc
     from sh import ssh
     try:
         ssh("-oStrictHostKeyChecking=no",
             "-i {} ".format(opts['ssh_private_key_path']),
             "ubuntu@" + host, cmd)
     except Exception as ex:
         print "Error running", cmd
         print ex.stdout
         print ex.stderr
         raise
Beispiel #15
0
    def start(self):
        print('Starting service: ', self.name)
        if settings.TEST_SERVER == 'localhost':
            self._run_manage_py('runserver', bg=True)
        else:
            sh.ssh(settings.TEST_SERVER, 'sudo supervisorctl start all')

        for i in range(5):
            time.sleep(1)
            if self.is_running():
                return

        raise Exception("Service not running after 5 seconds")
Beispiel #16
0
    def _remote_host_command(self, host, command=None):
        if not host:
            print('\033[31mhost invalid!\033[0m host: {}'.format(host))
            return

        user = cmd_args['--user'] or self.config[
            'username'] or self._default_user
        host = '{}@{}'.format(user, self._match_short_host(host))

        jump = None
        if cmd_args['--jump'] and self.config.get('jump_host'):
            jump = self.config['jump_host']
        elif cmd_args['--jump-host']:
            jump = cmd_args['--jump-host']

        print('\033[1;32m=======> host: {} jump: {} command: {}\033[0m'.format(
            host, jump, command))

        try:
            if jump and command:
                sh.ssh('-At', host, '-J', jump, command, _fg=True)
            elif jump:
                sh.ssh('-At', host, '-J', jump, _fg=True)
            elif command:
                sh.ssh('-At', host, command, _fg=True)
            else:
                sh.ssh('-At', host, _fg=True)
        except sh.ErrorReturnCode:
            print('{}ERROR{}'.format(COLOR_RED, COLOR_DEFAULT))
Beispiel #17
0
def deploy(host, command):
    def line(response, stdin):
        print("> ", response.rstrip())

    def error(response, stdin):
        print("! ", response.rstrip())

    def done(cmd, success, exit_code):
        print("Done: {!r}, {!r}".format(success, exit_code))

    try:
        sh.ssh(host, command, _out=line, _err=error, _done=done)

    except sh.ErrorReturnCode as e:
        print("Error during ssh:", e.exit_code)
Beispiel #18
0
 def write_kafka_config(self):
     f = NamedTemporaryFile(delete=False)
     f.write(
         jinja2.Template(KAFKA_CONFIG).render(
             self.node_config["kafka_config"]).encode("utf-8"))
     f.close()
     scp(
         "-i", self.node_config["ssh_key"], f.name,
         self.node_config["ssh_user"] + "@" + self.node_config["host"] +
         ":kafka.cfg")
     os.unlink(f.name)
     path = self.node_config["kafka_config"]["path"]
     ssh("-i", self.node_config["ssh_key"],
         self.node_config["ssh_user"] + "@" + self.node_config["host"],
         f"sudo mv kafka.cfg {path}")
Beispiel #19
0
 def __init__(self, device_dict):
     Device.__init__(self, device_dict)
     self.username = device_dict[YAMLKeyword.username]
     if YAMLKeyword.device_types in device_dict.keys():
         self.device_types = device_dict[YAMLKeyword.device_types]
     else:
         self.device_types = ["cpu"]
     try:
         sh.ssh('-q', '%s@%s' % (self.username, self.address),
                'exit')
     except sh.ErrorReturnCode as e:
         six.print_('device connect failed, '
                    'please check your authentication',
                    file=sys.stderr)
         raise e
Beispiel #20
0
def send_scripts_to_server(config: dict, destination: str, tar_filename: str, scripts_dir: str):
    click.echo('{} Creating tar of local scripts'.format(log_prefix()))
    with tarfile.open(tar_filename, "w:gz") as tar:
        for script_path in config['scripts']:
            tar.add(script_path)
        tar.list(verbose=True)

    click.echo('{} Sending scripts to instance'.format(log_prefix()))
    destination_with_location = destination + ':.'
    # scp -i ~/keys/hello.key local_scripts.tar.gz [email protected]:.
    scp('-i', config['private_key_path'], tar_filename, destination_with_location)

    click.echo('{} Extracting tar on instance'.format(log_prefix()))
    # ssh -i ~/keys/hello.key [email protected] tar xzf local_scripts.tar.gz
    ssh('-i', config['private_key_path'], destination, 'tar', 'xzf', tar_filename)
Beispiel #21
0
    def _thread_function(unit: str):
        logger.debug(f"Executing `{command}` on {unit}.")
        try:
            ssh(unit, command)
            if all_jobs:  # tech debt
                ssh(
                    unit,
                    "pio run led_intensity --intensity 0 --channel A --channel B --channel C --channel D --no-log",
                )
            return True

        except Exception as e:
            logger.debug(e, exc_info=True)
            logger.error(f"Unable to connect to unit {unit}.")
            return False
 def _get_running_instances(self):
     process = ssh("root@%s" % self.__proxmox_host,
                   "qm",
                   "list",
                   _out=self._process_list,
                   _bg=True)
     process.wait()
Beispiel #23
0
 def execute(self, name, command):
     """executes the command on the named host"""
     if name in ["localhost"]:
         r = '\n'.join(sh.sh("-c", command).split()[-1:])
     else:
         r = '\n'.join(sh.ssh(name, command).split()[-1:])
     return r
Beispiel #24
0
 def execute(self, name, command):
     """executes the command on the named host"""
     if name in ["localhost"]:
         r = '\n'.join(sh.sh("-c", command).split()[-1:])
     else:
         r = '\n'.join(sh.ssh(name, command).split()[-1:])
     return r
Beispiel #25
0
 def exec_command(self, command, *args, **kwargs):
     if self.system == SystemType.android:
         return sh.adb('-s', self.address, 'shell', command, *args,
                       **kwargs)
     elif self.system == SystemType.arm_linux:
         return sh.ssh('%s@%s' % (self.username, self.address), command,
                       *args, **kwargs)
Beispiel #26
0
    def __call__(self, *args, **kwargs):
        self.process = ssh('{}@{}'.format(self.user, self.host), '-p', self.port,
                            *args,
                            _out=self.out_iteract, _out_bufsize=0, _tty_in=True,
                            _err=self.err_iteract, **kwargs)

        super().__call__(*args, **kwargs)
Beispiel #27
0
    def start(self, wait=True, timeout=30):

        self.stop()
        log.debug(f'Opening SSH connection to {self.host}')

        self._ssh_stdout = ''
        self._password_entered = False
        self.sh = ssh(
            self.host,
            _out=self._enter_password,
            _out_bufsize=0,
            _tty_in=True,
            _unify_ttys=True,
            _long_sep=' ',
            _bg=True,
            **self.ssh_args
        )
        self.command = b' '.join(self.sh.cmd).decode()
        log.debug(self.command)

        left = int(timeout)
        if wait:
            while not self.is_connected():
                left -= 1
                if left <= 0 or not self.sh.is_alive():
                    raise SSHProxy(f'Failed to start SSHProxy {self}')
                else:
                    sleep(1)
Beispiel #28
0
 def _qmgr(self, command):
     result = None
     try:
         result = ssh("{0}@{1}".format(self.user, self.host), command)
     except:
         raise RuntimeError("can not execute qmgr via ssh {0}".format(command))
     return result
Beispiel #29
0
    def qstat(self, refresh=True):
        if self.pbs_qstat_data is None or refresh:
            try:
                xmldata = str(ssh("{0}@{1}".format(self.user, self.host), "qstat", "-x"))
            except:
                raise RuntimeError("can not execute pbs qstat via ssh")
            info = {}

            try:
                xmldoc = minidom.parseString(xmldata)

                itemlist = xmldoc.getElementsByTagName('Job')
                for item in itemlist:
                    job = {}
                    for attribute in item.childNodes:
                        if len(attribute.childNodes) == 1:
                            job[attribute.nodeName] = attribute.firstChild.nodeValue
                        else:
                            job[attribute.nodeName] = {}
                            for subchild in attribute.childNodes:
                                job[attribute.nodeName][subchild.nodeName] = subchild.firstChild.nodeValue

                    info[job['Job_Id']] = job
            except:
                pass
            self.pbs_qstat_data = info
        #return self.pbs_qstat_data

        #pprint (self.pbs_qstat_data)

        if self.cluster_queues is None:
            return {self.host: self.pbs_qstat_data}
        else:
            return self.qstat_extract(self.cluster_queues, self.pbs_qstat_data)
Beispiel #30
0
def ssh_command(key, dns, command):
    try:
        res = ssh("-oConnectTimeout=15", "-i", key, cluster_config.USER + "@" + dns, command)
        return res
    except Exception as e:
        print "Command failed", e
        return 
Beispiel #31
0
    def qinfo(self, refresh=True):
        '''
        returns qstat -Q -f in dict format
        
        :param refresh: refreshes the qinfo
        :type refresh: Boolean
        '''

        if self.pbs_qinfo_data is None or refresh:
            try:
                result = ssh("{0}@{1}".format(self.user, self.host),
                             "qstat -Q -f")
            except:
                raise RuntimeError("can not execute pbs qstat via ssh")

            d = {}

            # sanitize block

            result = result.replace("\n\t", "")

            result = result.replace('resources_assigned.',
                                    'resources_assigned_')
            result = result.replace('resources_default.', 'resources_default_')
            result = result.replace('resources_max.', 'resources_max_')

            for block in result.split("\n\n")[:-1]:
                block = [x.replace(" =", ":", 1) for x in block.split("\n")]
                block[0] = block[0].replace("Queue: ", "") + ":"
                queue = block[0][:-1]

                block = '\n'.join(block)

                block_yaml = yaml.safe_load(block)
                d[queue] = block_yaml[queue]

                d[queue]['queue'] = queue
                # end sanitize

                if 'state_count' in d[queue]:
                    values = [
                        x.split(":")
                        for x in d[queue]['state_count'].split(" ")
                    ]
                    d[queue]['state_count'] = {}
                    for value in values:
                        d[queue]['state_count'][value[0]] = value[1]

                if 'acl_hosts' in d[queue]:
                    # print d[queue]['acl_hosts']
                    d[queue]['acl_hosts'] = d[queue]['acl_hosts'].split("+")

        self.pbs_qinfo_data = d

        #pprint(self.qinfo_extract(self.cluster_queues, self.pbs_qinfo_data))

        if self.cluster_queues is None:
            return {self.host: self.pbs_qinfo_data}
        else:
            return self.qinfo_extract(self.cluster_queues, self.pbs_qinfo_data)
Beispiel #32
0
def ssh(command, yagi):
    """Runs an ssh command on a yagi server

    Arugments:
        command: string, command to execute
        yagi: string, yagi to run the command on"""
    return sh.ssh('-i', '/home/blume/.ssh/id_ed25519', command, yagi)
Beispiel #33
0
def remove_snap_status_file(snap_date,
                            cephhost='',
                            snap_status_file_path='',
                            noop=''):
    logger = logs.get_logger()
    if not cephhost:
        cephhost = settings.CEPH_HOST
    if not snap_status_file_path:
        snap_status_file_path = settings.SNAP_STATUS_FILE_PATH
    if not noop:
        noop = settings.NOOP
    REMOVE_SNAP_STATUS_FILE_COMMAND = ('rm -fv %s/%s' %
                                       (snap_status_file_path, snap_date))
    logger.info('removing snap status file on ceph host with command %s' %
                REMOVE_SNAP_STATUS_FILE_COMMAND)
    if noop:
        logger.info('would have run %s' % REMOVE_SNAP_STATUS_FILE_COMMAND)
        remove_snap_status_file_result = 'noop'
    else:
        remove_snap_status_file_result = sh.ssh(
            cephhost, REMOVE_SNAP_STATUS_FILE_COMMAND).strip('\n')
    # TODO handle some errors gracefully here
    logger.info("done removing snap status file: %s" %
                remove_snap_status_file_result)
    return True
Beispiel #34
0
def task_sensors(dict_idip):
    dict_data = {}
    for cluster in dict_idip.keys():
        dict_data[cluster] = {}
        for uid in dict_idip[cluster].keys():
            ip = cluster[uid]
            dict_data[cluster][uid]["ip"] = ip
            # fetch uid-ip server's temperature
            report = ssh("-o ConnectTimeout=1", "-o ConnectionAttempts=1", "user", ip, "sensors")
            temp = parseCpuTemperature(report)
            dict_data[cluster][uid]["temp"] = temp[0]

    # write current temperature data to mongo db

    # get the highest cpu temperature throught parseing the output of 'sensors'
    # return is a list including 2 elems, [36.0, C] or [36.0, F]
    # C or F is the unit name of temperature
    def parseCpuTemperature(self, values):
        lines = values.split("\n")
        cpu_lines = [x for x in lines if x.find("(high") > -1]
        tunit = "C"
        tmax = -1
        for line in cpu_lines:
            # position of degree sign
            pos_degree = line.find(u"\xb0")
            # position of +
            pos_plus = line.find(u"+")
            tnum = float(line[pos_plus + 1 : pos_degree])
            tunit = line[pos_degree + 1 : pos_degree + 2]
            if tnum > tmax:
                tmax = tnum

        return [tmax, tunit]
 def delete_user_repo(self):
     """delete user's remote repository"""
     conf = self.configuration
     dst_repo_name = conf.get('buildbot-configs', 'dst_repo_name')
     if not dst_repo_name.endswith(self.bug):
         msg = "cowardly refusing to delete {0}".format(dst_repo_name)
         msg = "{0}, its name does not end with {1}".format(msg, self.bug)
         log.error(msg)
         raise BuildbotConfigsError(msg)
     cmd = ("hg.mozilla.org", "edit", dst_repo_name,  "delete", "YES")
     log.info('deleting {0}'.format(dst_repo_name))
     log.debug('running ssh {0}'.format(' '.join(cmd)))
     output = []
     try:
         for line in sh.ssh(cmd, _iter=True):
             out = line.strip()
             log.debug(out)
             output.append(out)
     except sh.ErrorReturnCode_1:
         log.debug('trying to delete a non existing repo... pass')
         pass
     except sh.ErrorReturnCode:
         msg = 'bad exit code executing {0}'.format(' '.join(cmd))
         log.error(msg)
         raise BuildbotConfigsError(msg)
Beispiel #36
0
def task_sensors(dict_idip):
    dict_data = {}
    for cluster in dict_idip.keys():
        dict_data[cluster] = {}
        for uid in dict_idip[cluster].keys():
            ip = cluster[uid]
            dict_data[cluster][uid]["ip"] = ip
            # fetch uid-ip server's temperature
            report = ssh("-o ConnectTimeout=1", "-o ConnectionAttempts=1",
                         "user", ip, "sensors")
            temp = parseCpuTemperature(report)
            dict_data[cluster][uid]["temp"] = temp[0]

    # write current temperature data to mongo db

    # get the highest cpu temperature throught parseing the output of 'sensors'
    # return is a list including 2 elems, [36.0, C] or [36.0, F]
    # C or F is the unit name of temperature
    def parseCpuTemperature(self, values):
        lines = values.split("\n")
        cpu_lines = [x for x in lines if x.find("(high") > -1]
        tunit = "C"
        tmax = -1
        for line in cpu_lines:
            # position of degree sign
            pos_degree = line.find(u"\xb0")
            # position of +
            pos_plus = line.find(u"+")
            tnum = float(line[pos_plus + 1:pos_degree])
            tunit = line[pos_degree + 1:pos_degree + 2]
            if tnum > tmax:
                tmax = tnum

        return [tmax, tunit]
Beispiel #37
0
    def delete_node(self, node):
        app.logger.info("Deleting node for: " + str(self.client.id))

        node.active = False
        node.date_rm = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        db.session.add(node)

        ssh(
            "root@" + self.hypervisorIP, "qm stop " + str(node.vid) +
            " && qm destroy " + str(node.vid) + " --skiplock")
        ssh("root@" + self.hypervisorIP,
            "rm -rf /mnt/pve/virt/images/" + str(node.vid))

        db.session.commit()
        app.logger.info("Deleted node: " + str(node.id) + " " + node.type)

        return True
Beispiel #38
0
def _synchronize_mappings(sync_mappings):
    for from_path, to_path in sync_mappings:

        if ssh_connection_uri:
            ssh(ssh_connection_uri, 'mkdir', '-p', to_path.as_posix())
            to_uri = ssh_connection_uri + ':' + to_path.as_posix()
        else:
            to_path.mkdir(parents=True, exist_ok=True)
            to_uri = to_path.as_posix()

        exclude_params = zip(len(excludes) * ['--exclude'], excludes)

        rsync('-rltv',
              from_path.as_posix() + '/',
              to_uri + '/',
              delete=True,
              *exclude_params)
Beispiel #39
0
 def _qmgr(self, command):
     result = None
     try:
         result = ssh("{0}@{1}".format(self.user, self.host), command)
     except:
         raise RuntimeError(
             "can not execute qmgr via ssh {0}".format(command))
     return result
Beispiel #40
0
 def _run_manage_py(self, command, *args, **kwargs):
     python = '{}/bin/python'.format(self.settings['PYTHON_ENV'])
     manage = '{}/manage.py'.format(self.settings['ENVIRONMENT_ROOT'])
     try:
         if settings.TEST_SERVER == 'localhost':
             with cd(self.settings['ENVIRONMENT_ROOT']):
                 sh.Command(python)(manage, command, _bg=kwargs.get('bg', False), *args)
         else:
             sh.ssh(settings.TEST_SERVER, '{ssh_command} {manage} {manage_command} {args}'.format(
                 ssh_command='cd {} && {}'.format(self.settings['ENVIRONMENT_ROOT'], python),
                 manage=manage,
                 manage_command=command,
                 args=' '.join(args)), _iter=True)
     except Exception as e:
         if hasattr(e, 'stderr'):
             print(e.stderr)
         raise
Beispiel #41
0
 def _cmd(self, *args):
     cmd = []
     cmd.extend(['-p', self.gerrit_port,
                 "{0}@{1}".format(self.gerrit_user, self.gerrit_host),
                 'gerrit'])
     cmd.extend(args)
     log.debug(cmd)
     return ssh(*cmd)
Beispiel #42
0
	def get_status(self, jobid):
		""".. function:: get_status(jobid)

		      Return the current status of the job referenced by the given jobid

		      :param jobid: id of job to check on host"""

		result = ssh(self.host, "checkjob", jobid)
		return result
Beispiel #43
0
    def qinfo(self, refresh=True):
        '''
        returns qstat -Q -f in dict format
        
        :param refresh: refreshes the qinfo
        :type refresh: Boolean
        '''
        
        if self.pbs_qinfo_data is None or refresh:
            try:
                result = ssh("{0}@{1}".format(self.user, self.host), "qstat -Q -f")
            except:
                raise RuntimeError("can not execute pbs qstat via ssh")

            d = {}

            # sanitize block

            result = result.replace("\n\t", "")

            result = result.replace('resources_assigned.', 'resources_assigned_')
            result = result.replace('resources_default.', 'resources_default_')
            result = result.replace('resources_max.', 'resources_max_')


            for block in result.split("\n\n")[:-1]:
                block = [x.replace(" =", ":", 1) for x in block.split("\n")]
                block[0] = block[0].replace("Queue: ", "") + ":"
                queue = block[0][:-1]

                block = '\n'.join(block)

                block_yaml = yaml.safe_load(block)
                d[queue] = block_yaml[queue]

                d[queue]['queue'] = queue
                # end sanitize

                if 'state_count' in d[queue]:
                    values = [x.split(":") for x in d[queue]['state_count'].split(" ")]
                    d[queue]['state_count'] = {}
                    for value in values:
                        d[queue]['state_count'][value[0]] = value[1]

                if 'acl_hosts' in d[queue]:
                    # print d[queue]['acl_hosts']
                    d[queue]['acl_hosts'] = d[queue]['acl_hosts'].split("+")

        self.pbs_qinfo_data = d

        #pprint(self.qinfo_extract(self.cluster_queues, self.pbs_qinfo_data))
        
        if self.cluster_queues is None:
            return {self.host: self.pbs_qinfo_data}
        else:
            return self.qinfo_extract(self.cluster_queues, self.pbs_qinfo_data)
Beispiel #44
0
    def open_port_forwarding(self):
        """
        Opens correct ports in remote for forwarding.
        :return:
        """

        base_command = "ssh %(user)s@%(server)s -f -N -L %(port)s:%(ip)s:%(port)s"

        # open stdin_port
        sh.ssh(base_command
               % {"user": self.user,
                  "server": self.server,
                  "port": self.kernel_data.get("stdin_port"),
                  "ip": self.kernel_data.get("ip")})

        # open control_port
        sh.ssh(base_command
               % {"user": self.user,
                  "server": self.server,
                  "port": self.kernel_data.get("control_port"),
                  "ip": self.kernel_data.get("ip")})

        # open hb_port
        sh.ssh(base_command
               % {"user": self.user,
                  "server": self.server,
                  "port": self.kernel_data.get("hb_port"),
                  "ip": self.kernel_data.get("ip")})

        # open shell_port
        sh.ssh(base_command
               % {"user": self.user,
                  "server": self.server,
                  "port": self.kernel_data.get("shell_port"),
                  "ip": self.kernel_data.get("ip")})

        # open iopub_port
        sh.ssh(base_command
               % {"user": self.user,
                  "server": self.server,
                  "port": self.kernel_data.get("iopub_port"),
                  "ip": self.kernel_data.get("ip")})
def ssh_tunnel(nodes, passwd=None):
	""" Sets up an SSH tunnel to a given set of nodes

	Args:
		nodes (list): List of node IDs on which the image should be installed

	Note: It will prompt for ssh password.

		DO NOT EXPECT THIS FUNCTION TO EXIT.
	"""
	if passwd is not None:
		global __ssh_pass__
		__ssh_pass__ = passwd
	args = ["-nNxT4"]
	for node in nodes:
		args.append("-L")
		args.append("9{id:03d}:localhost:9{id:03d}".format(id=node))
	args.append("*****@*****.**")
	ssh(args, _out=__ssh_interact__, _out_bufsize=0, _tty_in=True)
	ssh.wait()
    def create_repo(self):
        """creates a buildbot-configs repo as
           a copy of https://hg.mozilla.org/build/buildbot-configs/
           It creates hg.m.o/users/<ldap>_mozilla.com/buildbot-configs-<bug>
           appending -<bug> because this script creates and destroys that
           repo without asking for user permission.

        """
        conf = self.configuration
        src_repo_name = conf.get('buildbot-configs', 'src_repo_name')
        dst_repo_name = conf.get('buildbot-configs', 'dst_repo_name')
        if not dst_repo_name.endswith(self.bug):
            msg = "cowardly refusing to clone {0}".format(dst_repo_name)
            msg = "{0}, its name does not end with {1}".format(msg, self.bug)
            log.error(msg)
            raise BuildbotConfigsError(msg)
        cmd = ("hg.mozilla.org", "clone", dst_repo_name,  src_repo_name)
        log.info('cloning {0} to {1}'.format(src_repo_name, dst_repo_name))
        log.debug('running ssh {0}'.format(' '.join(cmd)))
        sh.ssh(cmd)
Beispiel #47
0
    def __call__(self, *args, **kwargs):
        self.process = ssh(
                            '-o UserKnownHostsFile=/dev/null',
                            '-o StrictHostKeyChecking=no',
                            '-o LogLevel=quiet',
                            '{0}@{1}'.format(self.user, self.host),
                            '-p', self.port,
                            'LANG=C', *args,
                            _out=self.out_iteract, _out_bufsize=0, _tty_in=True,
                            **kwargs)

        super(SSHController, self).__call__(*args, **kwargs)
Beispiel #48
0
    def get_ipmi_temperature(self, hostname):

        (hostaddr, data) = self.inventory.ipadr_cluster(hostname, "bmc")
        clustername = data["cm_cluster"]
        config = self.config_server[clustername]['bmc']
        # username and password to access ipmitool
        password = config['password']
        username = config['user']

        command = "ipmitool -I lanplus -H {0} -U {1} -P {2} sdr type temperature".format(
            hostaddr, username, password)
        # access ipmitool need a proxy server
        proxyaddr = config['proxy']['ip']
        proxyusername = config['proxy']['user']

        log.debug("Get temperature for host '{2}' via proxy server '{0}@{1}'".format(
            proxyusername, proxyaddr, hostname))

        try:
            result = ssh("{0}@{1}".format(proxyusername, proxyaddr), command)
        except:
            result = ""

        dict_result = None
        if result == "":
            log.warning(
                "Cannot access to host '{0}' OR ipmitool failed on host '{0}'".format(hostname))
        else:
            log.debug(
                "Temperature data retrieved from host '{0}' successfully.".format(hostname))
            dict_result = {}
            lines = result.split("\n")
            for line in lines:
                fields = map(lambda x: x.strip(), line.split("|"))
                name = fields[0]
                # test and ignore the blank line in the last output
                if name == "":
                    continue
                value = "-1"
                unit = "C"
                m = self.patt.match(fields[4])
                if m:
                    value = m.group(1)
                    unit = m.group(2)
                dict_result[name] = {"name": fields[0],
                                     "address": fields[1],
                                     "status": fields[2],
                                     "entity": fields[3],
                                     "value": value,
                                     "unit": unit
                                     }
        return dict_result
    def create_repo(self):
        """creates a reposiory as
           a copy of https://hg.mozilla.org/build/self.name/
           It creates hg.m.o/users/<ldap>_mozilla.com/self.name-<bug>
           appending -<bug> because this script creates and destroys that
           repo without asking for user permission.

        """
        conf = self.configuration
        src_repo_name = conf.get(self.name, 'src_repo_name')
        dst_repo_name = conf.get(self.name, 'dst_repo_name')
#        if not dst_repo_name.endswith(self.bug):
#            msg = "cowardly refusing to clone {0}".format(dst_repo_name)
#            msg = "{0}, its name does not end with {1}".format(msg, self.bug)
#            log.error(msg)
#            raise RepositoryError(msg)
        # added more robust check on pushing rather than cloning...

        cmd = ('hg.mozilla.org', 'clone', dst_repo_name,  src_repo_name)
        log.info('cloning {0} to {1}'.format(src_repo_name, dst_repo_name))
        log.debug('running ssh {0}'.format(' '.join(cmd)))
        ssh(cmd)
 def _exec_ssh_cmd(self, cmd, ignore_exit_code_1=False):
     try:
         for line in ssh(cmd, _iter=True):
             log.debug(line.strip())
     except ErrorReturnCode_1:
         if ignore_exit_code_1:
             log.debug('ignoring exit code = 1')
         else:
             msg = 'error executing {0}'.format(' '.join(cmd))
             log.debug(msg)
             raise RepositoryError(msg)
     except ErrorReturnCode:
         log.debug(msg)
         raise RepositoryError(msg)
Beispiel #51
0
	def submit(self, scriptPath):
		""".. function:: submit(scriptPath):

		      Submits and runs a given local script with given parameters on cluster
			
		      :param scriptPath: path of script on local machine"""
		
		#transfer script file to remote host
		scpHost = self.host + ":~"
		scp(scriptPath, scpHost)

		result = ssh(self.host, "qsub", script)
		#Return an id...
		return result
Beispiel #52
0
def start(args=None, opt=None):
    # start Consul key/value store for service discovery
    # on(MASTER).sudo(fmt("sh -c 'rm -rf /scratch/consul; nohup /homes/sys/bholt/bin/consul agent -server -bootstrap -data-dir /scratch/consul -node=master -bind=#{CONSUL} -client #{CONSUL} >#{CONSUL_LOG} 2>&1 &'"))
    cmd = fmt("--name=consul -d --net=host -p 8400:8400 -p 8500:8500 -p 8600:53/udp progrium/consul -server -bootstrap -node=master -bind=#{CONSUL} -client #{CONSUL}")
    on(MASTER).docker.run(cmd.split())

    time.sleep(4)
    
    for ssh in machines:
        
        # create new bridge network so our docker can run alongside the normal one
        if ssh.ip.link.show(BRIDGE, _ok_code=[0,1]).exit_code == 1:
            ssh.sudo.brctl.addbr(BRIDGE)
            ssh.sudo.ip.addr.add('10.20.30.1/24', 'dev', BRIDGE)
            ssh.sudo.ip.link.set('dev', 'swarm', 'up')
        
        # start docker daemon on remote host, headless via 'nohup', output to logfile
        ssh("sudo sh -c 'nohup docker daemon -H tcp://0.0.0.0:{dp} --exec-root=/var/run/docker.swarm --graph=/var/lib/docker.swarm --pidfile=/var/run/docker.swarm.pid --bridge={b} --cluster-advertise=ens1:{dp} --cluster-store=consul://{c}:{cp} >{log} 2>&1 &'".format(dp=DOCKER_PORT, c=CONSUL, cp=CONSUL_PORT, log="/var/log/docker.swarm", b=BRIDGE))
    
    time.sleep(1)
    # start Swarm manager
    nodelist = ','.join(["{}:{}".format(h, DOCKER_PORT) for h in hosts])
    docker(MASTER).run("--name=swarm", "-d", "--publish={}:2375".format(SWARM_PORT), "swarm:1.1.0", "--debug", "manage", "nodes://{}".format(nodelist))
Beispiel #53
0
 def _qmgr(self, command):
     '''
     simple executes the command on a remote host. The host is globally set
     
     TODO: there is a bug here as qmgr is not properly called
     
     :param command: the command to be executed
     :type command: String
     '''
     result = None
     try:
         result = ssh("{0}@{1}".format(self.user, self.host), command)
     except:
         raise RuntimeError("can not execute qmgr via ssh {0}".format(command))
     return result
Beispiel #54
0
def get_default_modules(user,host):
    list = {}
    print host
    result = sort(ssh("{0}@{1}".format(user,host), "module", "-l", "avail", "2>&1"))
    print "ok"
    
    for line in result:
        if "default" in line:
            content = line.split()
            print content
            try:
                (module_package, module_version) = content[0].split("/")
                list[module_package] = module_version
            except:
                pass
    return list
Beispiel #55
0
def ssh_handshake(target):
    """
    Make sure we get a remote host added in ~/.ssh/known_hosts in sane manner.

    Make sure we are not asked for a password when doing SSH connection to remote.to

    :param target: SSH spec. May include diretory.
    """
    from sh import ssh

    # http://amoffat.github.com/sh/tutorials/2-interacting_with_processes.html

    def callback(char, stdin):
        """
        Handle SSH input
        """

        _unbuffered_stdout.write(char.encode())

        # Ugh http://stackoverflow.com/a/4852073/315168
        callback.aggregated += char

        if "(yes/no)? " in callback.aggregated:
            stdin.put("yes\n")
            return True

        # Clear line
        if char == "\n":
            callback.aggregated = ""

    callback.aggregated = ""

    parts = target.split(":")
    host = parts[0]

    print "Doing SSH handshake and no password login verification to %s. If the process hangs here check you have working SSH_AGENT connection (e. g. not stale one from screen)." % host

    # Run a dummy ssh command, so we see we get public key auth working
    # If the server prompts for known_hosts update auto-yes it
    p = ssh("-o", "PreferredAuthentications=publickey", host, "touch ~/plonetool.handshake",
        _out=callback,
        _err=callback,
        _out_bufsize=0,
        _tty_in=True)
    p.wait()
Beispiel #56
0
	def get_status(self, jobid):
		""".. function:: get_status(jobid)

		      Return the current status of the job referenced by the given jobid

		      :param jobid: id of job to check on host or job name within MongoDB"""

		if Job.objects.with_id(jobid):
			job = Job.objects.with_id(jobid)
			result = "\nJob: " + job.name 
			+ "\nAuthor: " + job.author
			+ "\n\nNodes: " + job.nodes
			+ "\nPPN: " + job.ppn 
			+ "\nWalltime: " + job.walltime 
			+ "\nQueue name: " + job.queuename
		else:
			result = ssh(self.host, "checkjob", jobid)
		
		return result
Beispiel #57
0
    def pbsnodes(self, refresh=True):
        '''
        returns the pbs node infor from an pbs_nodes_raw_data is a string 
        
        :param refresh: refresheds the data if set to True
        :type refresh: Boolean
        '''
        """"""

        if self.pbs_nodes_data is None or refresh:
            try:
                result = ssh("{0}@{1}".format(self.user, self.host), "pbsnodes", "-a")
            except:
                raise RuntimeError("can not execute pbs nodes via ssh")
            pbsinfo = {}
            nodes = result.split("\n\n")
            for node in nodes:
                pbs_data = node.split("\n")
                pbs_data = [e.strip()  for e in pbs_data]
                name = pbs_data[0]
                if name != "":
                    pbsinfo[name] = {u'name': name}
                    for element in pbs_data[1:]:
                        try:
                            (attribute, value) = element.split (" = ")
                            if attribute == 'status':
                                status_elements = value.split(",")
                                pbsinfo[name][attribute] = {}
                                for e in status_elements:
                                    (a, v) = e.split("=")
                                    pbsinfo[name][attribute][a] = v
                            elif attribute == 'jobs':
                                pbsinfo[name][attribute] = value.split(',')
                            elif attribute == 'note' and (value.strip().startswith("{") or value.strip().startswith("[")):
                                pbsinfo[name][attribute] = literal_eval(value)
                            else:
                                pbsinfo[name][attribute] = value
                        except:
                            pass
            self.pbs_nodes_data = pbsinfo

        return self.pbs_nodes_data
Beispiel #58
0
    def qinfo_user(self, refresh=True):
        """Return the number of user from qstat
        
            example line is:
            873664.i136 ...549.sh xcguser  0   Q long
            $3 indicates an user id
        """
        if self.pbs_qinfo_default_data is None or refresh:
            try:
                result = ssh("{0}@{1}".format(self.user, self.host), "qstat")
            except:
                raise RuntimeError("can not execute pbs qstat via ssh")

            # sanitize block
            data = self.convert_into_json(result)
            merged_data = self.merge_queues(self.cluster_queues, data)
            self.pbs_qinfo_default_data = merged_data
        else:
            merged_data = self.pbs_qinfo_default_data

        return merged_data
Beispiel #59
0
    def extract(self, conf):
        merge = conf.get("should_line_merge", False)
        before = conf.get("break_only_before", None)
        if merge:
            before = re.compile(before)
        
        value = HashBuilder({
                "type": conf.get("type", "log"),
                "source_path": conf["path"],
                "tags": conf.get("tags", [])
            })

        if conf.get("host"):
            host = "%s@%s" % (conf["user"], conf["host"]) if conf.get("user") else conf["host"]
            remote_server = ssh.bake(host)
            mac = str(ssh(host, "ifconfig | grep 'eth0'"))
            value.shared["source_host"] = host
            value.shared["source_mac"] = re.findall("..:..:..:..:..:..", mac)[0]
            log = remote_server.tail("-f", conf["path"], _iter=True)
        else:
            touch(conf["path"])
            if conf.get("from", None) == "now":
                filesize = "-c0"
            else:
                filesize = "-c%s" % (int(stat("-nf" ,'%z', conf["path"])) * 2 + 10)
            log = tail("-F", filesize, conf["path"], _iter=True)

        for n, line in enumerate(log):
            if merge:
                if before.match(line):
                    if value.has_data():
                        yield value.dict()
                        # time.sleep(0.1)
                    value.build()
                value.log(line)
            else:
                value.build(line)
                yield value.dict()