Exemple #1
0
    def testSftpService(self):
        users = ({
            'email': '*****@*****.**',
            'login': '******',
            'firstName': 'First',
            'lastName': 'Last',
            'password': '******'
        }, {
            'email': '*****@*****.**',
            'login': '******',
            'firstName': 'First',
            'lastName': 'Last',
            'password': '******'
        })

        admin, user = [self.model('user').createUser(**user) for user in users]

        collections = ({
            'name': 'public collection',
            'public': True,
            'creator': admin
        }, {
            'name': 'private collection',
            'public': False,
            'creator': admin
        })

        privateFolder = self.model('folder').findOne({
            'parentCollection': 'user',
            'parentId': user['_id'],
            'name': 'Private'
        })
        self.assertIsNotNone(privateFolder)

        self.model('upload').uploadFromFile(six.BytesIO(b'hello world'),
                                            size=11,
                                            name='test.txt',
                                            parentType='folder',
                                            parent=privateFolder,
                                            user=user)

        for coll in collections:
            self.model('collection').createCollection(**coll)

        client = paramiko.SSHClient()
        client.load_system_host_keys()
        client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        # Incorrect password should raise authentication error
        with self.assertRaises(paramiko.AuthenticationException):
            client.connect('localhost',
                           TEST_PORT,
                           username='******',
                           password='******',
                           look_for_keys=False,
                           allow_agent=False)

        # Authenticate as admin
        client.connect('localhost',
                       TEST_PORT,
                       username='******',
                       password='******',
                       look_for_keys=False,
                       allow_agent=False)
        sftpClient = client.open_sftp()
        self.assertEqual(sftpClient.listdir('/'), ['collection', 'user'])

        # Listing an invalid top level entity should fail
        with self.assertRaises(IOError):
            sftpClient.listdir('/foo')

        # Test listing of users, collections, and subfolders
        self.assertEqual(set(sftpClient.listdir('/user/')),
                         {'admin', 'regularuser'})
        self.assertEqual(set(sftpClient.listdir('/user/admin')),
                         {'Public', 'Private'})
        self.assertEqual(set(sftpClient.listdir('/collection')),
                         {'public collection', 'private collection'})

        self.assertEqual(sftpClient.listdir('/user/regularuser/Private'),
                         ['test.txt'])
        self.assertEqual(
            sftpClient.listdir('/user/regularuser/Private/test.txt'),
            ['test.txt'])

        with six.assertRaisesRegex(self, IOError, 'No such file'):
            sftpClient.listdir('/user/nonexistent')

        with six.assertRaisesRegex(self, IOError, 'No such file'):
            sftpClient.file('/user/regularuser/Private')

        # Read a file using small enough buf size to require multiple chunks internally.
        file = sftpClient.file('/user/regularuser/Private/test.txt/test.txt',
                               'r',
                               bufsize=4)
        self.assertEqual(file.read(2), b'he')
        self.assertEqual(file.read(), b'llo world')

        # Make sure we enforce max buffer length
        tmp, sftp.MAX_BUF_LEN = sftp.MAX_BUF_LEN, 2
        file = sftpClient.file('/user/regularuser/Private/test.txt/test.txt',
                               'r',
                               bufsize=4)
        with self.assertRaises(IOError):
            file.read()
        sftp.MAX_BUF_LEN = tmp

        # Test stat capability
        info = sftpClient.stat('/user/regularuser/Private')
        self.assertTrue(stat.S_ISDIR(info.st_mode))
        self.assertFalse(stat.S_ISREG(info.st_mode))
        self.assertEqual(info.st_mode & 0o777, 0o777)

        # lstat should also work
        info = sftpClient.lstat('/user/regularuser/Private/test.txt/test.txt')
        self.assertFalse(stat.S_ISDIR(info.st_mode))
        self.assertTrue(stat.S_ISREG(info.st_mode))
        self.assertEqual(info.st_size, 11)
        self.assertEqual(info.st_mode & 0o777, 0o777)

        # File stat implementations should agree
        info = file.stat()
        self.assertFalse(stat.S_ISDIR(info.st_mode))
        self.assertTrue(stat.S_ISREG(info.st_mode))
        self.assertEqual(info.st_size, 11)
        self.assertEqual(info.st_mode & 0o777, 0o777)

        # Make sure we can stat the top-level entities
        for path in ('/', '/user', '/collection'):
            info = sftpClient.stat(path)
            self.assertTrue(stat.S_ISDIR(info.st_mode))
            self.assertFalse(stat.S_ISREG(info.st_mode))
            self.assertEqual(info.st_mode & 0o777, 0o777)

        sftpClient.close()
        client.close()

        # Test that any username other than anonymous will fail using auth_none.
        sock = socket.socket()
        sock.connect(('localhost', TEST_PORT))
        trans = paramiko.Transport(sock)
        trans.connect()
        with self.assertRaises(paramiko.ssh_exception.BadAuthenticationType):
            trans.auth_none('')
        trans.close()
        sock.close()

        sock = socket.socket()
        sock.connect(('localhost', TEST_PORT))
        trans = paramiko.Transport(sock)
        trans.connect()
        with self.assertRaises(paramiko.ssh_exception.BadAuthenticationType):
            trans.auth_none('eponymous')
        trans.close()
        sock.close()

        # Test that a connection can be opened for anonymous access using auth_none.
        sock = socket.socket()
        sock.connect(('localhost', TEST_PORT))
        trans = paramiko.Transport(sock)
        trans.connect()
        trans.auth_none(username='******')
        sftpClient = paramiko.SFTPClient.from_transport(trans)

        # Only public data should be visible
        self.assertEqual(set(sftpClient.listdir('/user')),
                         {'admin', 'regularuser'})
        self.assertEqual(sftpClient.listdir('/collection'),
                         ['public collection'])
        self.assertEqual(sftpClient.listdir('/user/admin'), ['Public'])

        # Make sure the client cannot distinguish between a resource that does not exist
        # vs. one they simply don't have read access to.
        with six.assertRaisesRegex(self, IOError, 'No such file'):
            sftpClient.listdir('/user/regularuser/Private')

        with six.assertRaisesRegex(self, IOError, 'No such file'):
            sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r')

        sftpClient.close()
        trans.close()
        sock.close()

        # Test anonymous access
        client = paramiko.SSHClient()
        client.load_system_host_keys()
        client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        client.connect('localhost',
                       TEST_PORT,
                       username='******',
                       password='',
                       look_for_keys=False,
                       allow_agent=False)
        sftpClient = client.open_sftp()

        # Only public data should be visible
        self.assertEqual(set(sftpClient.listdir('/user')),
                         {'admin', 'regularuser'})
        self.assertEqual(sftpClient.listdir('/collection'),
                         ['public collection'])
        self.assertEqual(sftpClient.listdir('/user/admin'), ['Public'])

        # Make sure the client cannot distinguish between a resource that does not exist
        # vs. one they simply don't have read access to.
        with six.assertRaisesRegex(self, IOError, 'No such file'):
            sftpClient.listdir('/user/regularuser/Private')

        with six.assertRaisesRegex(self, IOError, 'No such file'):
            sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r')

        sftpClient.close()
        client.close()
def ssh_sftp_get(ip, user, password, remote_file, local_file, port=22):
    ssh = paramiko.SSHClient()  # 创建SSH Client
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())  # 添加新的SSH密钥
    ssh.connect(ip, port, user, password)  # 连接服务器
    sftp = ssh.open_sftp()  # 打开sftp
    sftp.get(remote_file, local_file)  # 下载服务器文件到本地
Exemple #3
0

def read_home_dir(lines):
    for l in lines:
        print("   [-] " + l.strip())


if __name__ == "__main__":

    host = input("IP to enumerate:\n> ")
    port = input("Port SSH is running on:\n> ")
    username = input("Username:\n> ")
    password = input("Password:\n> ")

    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ssh.connect(host, port, username, password)

    cmds = [
        "whoami", "ip addr", "ip neighbors", "ip route", "arp -a",
        "cat /etc/passwd", "cat /etc/hosts", "ls -la /home/" + username
    ]

    for cmd in cmds:
        stdin, stdout, stderr = ssh.exec_command(cmd)
        print("[+] " + cmd)
        lines = stdout.readlines()
        if cmd == "hostname":
            hostname(lines)
        elif cmd == "whoami":
            whoami(lines)
Exemple #4
0
def dump(host, port, user, password):
    ldid_path = '.tmp/ldid'
    try:
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh.connect(
            host,
            port=port,
            username=user,
            password=password,
            look_for_keys=False,
            allow_agent=False)
    except ConnectionError:
        raise
    else:
        bad_string = b"dd: failed to open '/dev/disk1': Operation not permitted\n"
        shsh_dump_path = '/tmp/shsh_dump.bin'

        ssh.invoke_shell()
        sftp_client = ssh.open_sftp()

        dd_in, dd_out, dd_err = ssh.exec_command(
            'dd if=/dev/disk1 of={}'.format(shsh_dump_path))

        if dd_err.read() == bad_string:
            print('dd failed to dump, applying entitlements!')

            # NOTE This is very important, so, seems like dropbear doesn't work.
            # This means that we absolutely need something that'll work with
            # paramiko. Thankfully, having OpenSSH installed fixes this issue.

            print('Downloading dd!')
            sftp_client.get('/bin/dd', '.tmp/dd')

            print('Applying entitlements from ent.plist to dd!')
            subprocess.run((ldid_path, '-Sent.plist', '.tmp/dd'))

            print('Uploading modified dd!')
            sftp_client.put('.tmp/dd', '/bin/dd-dump')

            ssh.exec_command('chmod +x /bin/dd-dump')

            dd_dump_in, dd_dump_out, dd_dump_err = ssh.exec_command(
                'dd-dump if=/dev/disk1 of={}'.format(shsh_dump_path))

        try:
            sftp_client.get(shsh_dump_path, '.tmp/shsh_dump.bin')
        except FileNotFoundError:
            print('Somehow {} does not exist!'.format(shsh_dump_path))
            raise
        else:
            sftp_client.close()
            ssh.close()

            # Ensure that '.tmp/shsh_dump.bin' was actually downloaded

            try:
                os.path.exists(shsh_dump_path)
            except FileNotFoundError:
                print('Wut? Somehow the dump does not exist!')
                raise
            else:
                print('Seems like {} exists!'.format(shsh_dump_path))
Exemple #5
0
 def __init__(self, **kwargs):
     self.client = paramiko.SSHClient()
     self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
     self.kwargs = kwargs
Exemple #6
0
def PyHSTRecon(rAxisNew, sSliceNew, eSliceNew, ringFilterNew, pagFilterNew,
               ccdFilterNew, ccdFilterThresNew, ringFilterThresNew,
               pagFilterThresNew, pSize, OpenImage, RecMetodo, nSlices):

    sftpURL = '10.2.105.44'
    sftpUser = '******'
    sftpPass = '******'

    ssh = paramiko.SSHClient()
    # automatically add keys without requiring human intervention
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    ssh.connect(sftpURL, username=sftpUser, password=sftpPass)

    sftp_client = ssh.open_sftp()

    path = "cd /" + pvRLoc.get(as_string=True) + "/" + pvRUserName.get(
        as_string=True) + "/" + pvRExpPath.get(
            as_string=True) + "/" + pvReconPath.get(as_string=True) + "; ls"

    stdin, stdout, stderr = ssh.exec_command(path)

    x = []

    pvreconWait.put(1)

    while True:
        line = stdout.readline()
        line = line.replace('\n', '')

        if line != '':
            x.append(line)

        else:
            break

    path2 = "/" + pvRLoc.get(as_string=True) + "/" + pvRUserName.get(
        as_string=True) + "/" + pvRExpPath.get(
            as_string=True) + "/" + pvReconPath.get(as_string=True) + "/"

    if 'input.par' in x:

        path3 = "/" + pvRLoc.get(as_string=True) + "/" + pvRUserName.get(
            as_string=True) + "/" + pvRExpPath.get(
                as_string=True) + "/" + pvReconPath.get(
                    as_string=True) + "/input.par"
        path4 = "/" + pvRLoc.get(as_string=True) + "/" + pvRUserName.get(
            as_string=True) + "/" + pvRExpPath.get(
                as_string=True) + "/" + pvReconPath.get(
                    as_string=True) + "/arquivo.par"

        inputpar = sftp_client.open(path3, '+r')
        arquivopar = sftp_client.open(path4, '+w')

        try:
            words = [
                'ROTATION_AXIS_POSITION', 'START_VOXEL_3', 'END_VOXEL_3',
                'DO_RING_FILTER', 'DO_PAGANIN', 'DO_CCD_FILTER',
                'CCD_FILTER_PARA', 'RING_FILTER_PARA', 'PAGANIN_Lmicron',
                'PAGANIN_MARGE'
            ]

            for line in inputpar:

                if not any(word in line for word in words):
                    arquivopar.write(line)

                if (words[0] in line) == True:
                    if not '#' in line:
                        arquivopar.write(words[0] + ' = %0.2f \n' % rAxisNew)
                    else:
                        arquivopar.write(line)

                if (words[1] in line) == True:
                    if not '#' in line:
                        arquivopar.write(words[1] + ' = %d \n' % sSliceNew)
                    else:
                        arquivopar.write(line)

                if (words[2] in line) == True:
                    if not '#' in line:
                        arquivopar.write(words[2] + ' = %d \n' % eSliceNew)
                    else:
                        arquivopar.write(line)

                if (words[3] in line) == True:
                    if not '#' in line:
                        arquivopar.write(words[3] + ' = %d \n' % ringFilterNew)
                    else:
                        arquivopar.write(line)

                if (words[4] in line) == True:
                    if not '#' in line:
                        arquivopar.write(words[4] + ' = %d \n' % pagFilterNew)
                    else:
                        arquivopar.write(line)

                if (words[5] in line) == True:
                    if not '#' in line:
                        arquivopar.write(words[5] + ' = %d \n' % ccdFilterNew)
                    else:
                        arquivopar.write(line)

                if (words[6] in line) == True:
                    if not '#' in line:
                        arquivopar.write(words[6] +
                                         ' = {"threshold": %0.4f }\n' %
                                         ccdFilterThresNew)
                    else:
                        arquivopar.write(line)

                if (words[7] in line) == True:
                    if not '#RING_FILTER_PARA' in line:
                        arquivopar.write(
                            words[7] +
                            ' = {"FILTER": ar, "threshold":%0.4f }# {"FILTER": ar }\n'
                            % ringFilterThresNew)
                    else:
                        arquivopar.write(line)

                if (words[8] in line) == True:
                    if not '#' in line:
                        arquivopar.write(words[8] +
                                         ' = %0.2f \n' % pagFilterThresNew)
                    else:
                        arquivopar.write(line)

                if (words[9] in line) == True:
                    if not '#' in line:
                        pagFilterMARGEThresNew = (pagFilterThresNew / pSize)
                        pagFilterMARGEThresNew = int(pagFilterMARGEThresNew)
                        arquivopar.write(words[9] +
                                         ' = %d \n' % pagFilterMARGEThresNew)
                    else:
                        arquivopar.write(line)

        finally:
            inputpar.close()
            arquivopar.close()

            sftp_client.remove(path3)
            sftp_client.rename(path4, path3)

        NumberOfSlices = (int(eSliceNew) - int(sSliceNew)) + 1

        msg = 'Recon Started using PyHST' + ' at ' + time.strftime(
            "%H:%M - %d-%m-%Y") + '\n' + 'With a total of ' + str(
                nSlices) + ' image(s)' + '\n' + 'Path: ' + str(
                    path2) + '\n' + 'WAIT!!!'
        error8.put(msg)
        print('\n', msg, '\n\n')

        recon_cmd = "cd /" + pvRLoc.get(
            as_string=True) + "/" + pvRUserName.get(
                as_string=True) + "/" + pvRExpPath.get(
                    as_string=True) + "/" + pvReconPath.get(
                        as_string=True
                    ) + "; time PyHST2_33beta2 input.par lnls112,0 > garbage"

        print(recon_cmd)
        stdin, stdout, stderr = ssh.exec_command(recon_cmd)
        print(stdout.readline())

    else:
        msg = 'ERROR!!! \nThere is no input.par on ' + str(path2)
        error8.put(msg)
        print('\n', msg, '\n\n')

        OpenImage = 0

    if (OpenImage == 1):
        os.system('Scripts_Recon/Open_Image.py tomo.vol &')

    ssh.close()

    pvreconWait.put(0)
Exemple #7
0
    def get_logs(self, dest_dir=None):
        """Get the log files from this VM instance

        :param dest_dir: Optional destination directory/subdirectory
        :raise paramiko.SSHException: connection failed
        :return:
        """
        dest_dir = self._get_log_dest_dir(dest_dir)

        if os.path.exists(dest_dir):
            self.logger.info("Deleting existing log dir: \"%s\" ...", dest_dir)
            cmd_delete_log_dir = "rm -rf %s" % dest_dir
            os.system(cmd_delete_log_dir)
        self.logger.info("Creating IT log dir: \"%s\" ...", dest_dir)
        os.makedirs(dest_dir)

        # get the list of python processes running on the it_api_topology machine
        cmd_get_processes = "ps w | grep python"
        process_list = self.run_command(cmd_get_processes)
        if process_list is not None:
            self.logger.info("Processes on \"%s\":\n%s", self.name, "".join(process_list))
            ps_file = open(os.path.join(dest_dir, "ps-python.txt"), "w")
            for process in process_list:
                # Note - the items in the process_list already have carriage returns
                ps_file.write("%s" % process)
            ps_file.close()

        # get the rpd log files from the it_api_topology machine

        # there are some logs in /
        cmd_get_log_file_list_root = "ls -1 /*.log"
        log_file_list_root = self.run_command(cmd_get_log_file_list_root)

        # and there are some logs in /tmp
        cmd_get_log_file_list_tmp = "ls -1 /tmp/*.log"
        log_file_list_tmp = self.run_command(cmd_get_log_file_list_tmp)

        # concatenate the two lists into one
        log_file_list = log_file_list_root + log_file_list_tmp

        # If desired, add more files to the list
        # log_file_list.append("/etc/config/network")

        if log_file_list is not None:

            ssh_client = paramiko.SSHClient()
            ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh_client.connect(self.ip_addresses[0],
                               username=SSH_INFO[0],
                               password=SSH_INFO[1],
                               timeout=SSH_INFO[2])

            # I cannot get the paramiko SFTPClient to work.
            # sftp = client.open_sftp()
            # Attempting to open the sftp client results in:
            # /usr/lib/python2.7/dist-packages/Crypto/Cipher/blockalgo.py:141:
            #   FutureWarning: CTR mode needs counter parameter, not IV
            #   self._cipher = factory.new(key, *args, **kwargs)
            #   SSHException: EOF during negotiation
            # sftp.get("/tmp/*.log", dest_dir)
            # sftp.close()

            scp_client = SCPClient(ssh_client.get_transport())
            for log_file in log_file_list:
                # Note - the items in the log_file_list have carriage returns that
                #  need to be removed
                log_file = log_file.rstrip("\n")
                # Note - the dest_file path should be the log_file path, but relative to dest_path
                dest_file = os.path.join(dest_dir, log_file.lstrip(os.path.sep))
                dest_dirname = os.path.dirname(dest_file)
                # make sure that the directory exists for the dest_file
                if not os.path.exists(dest_dirname):
                    os.makedirs(dest_dirname)
                self.logger.info("Fetching \"%s\" to \"%s\"", log_file, dest_file)
                scp_client.get(log_file, dest_file)

            if scp_client is not None:
                scp_client.close()

            if ssh_client is not None:
                ssh_client.close()
def get_hypervisor_info(hostname,
                        verbose=False,
                        remote_user=None,
                        blindly_trust_host_keys=False):
    """Get domain information from a single libvirt hypervisor

    The results are returned as a HypervisorInfo object.

    Arguments:

    hostname: Hypervisor hostname from Nova.hypervisors.list(detailed=True)
    verbose: Whether to print messages about harmless actions, default: False
    remote_user: The user under which SSH tries to connect, default: None
    blindly_trust_host_keys: Allow MITM attacks, default: False

    This function connects to the given hypervisor over SSH and
    retrieves information about the libvirt domains known there.
    """

    h = HypervisorInfo(hostname)
    ssh = paramiko.SSHClient()
    if blindly_trust_host_keys:
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    try:
        ssh.connect(h.hostname, username=remote_user)
        _, stdout, stderr = ssh.exec_command("virsh list --uuid --all")
        for line in stdout:
            uuid = line.rstrip()
            if uuid == '':
                pass
            else:
                if verbose:
                    print("Found virsh instance {} on {}".format(
                        uuid, h.hostname))
                dom = LibvirtDomainInfo(uuid)
                h.add_domain(dom)
        for uuid, dom in h.domains.iteritems():
            _, stdout, stderr = ssh.exec_command(
                "virsh dominfo {}".format(uuid))
            dominfo = re.compile("^([^:]*):\s*(.*)$")
            for line in stdout:
                if line == "\n":
                    # Ignore stupid trailing empty line
                    pass
                else:
                    m = dominfo.match(line)
                    if m:
                        dom.info[m.group(1).lower()] = m.group(2)
                    else:
                        h.errors.append(
                            u"Cannot understand line {} in virsh dominfo output"
                            .format(line))
    except paramiko.SSHException as e:
        h.errors.append(u"Error SSHing to {}:\n    {}".format(
            h.hostname, e.message))
    except:
        h.errors.append(u"Unexpected error SSHing to {}:\n    {}".format(
            h.hostname,
            sys.exc_info()[0]))
    ssh.close()
    return h
Exemple #9
0
    def check(self, _):
        private_key = None

        if self.private_key_file is not None:
            try:
                if self.private_key_type == 'ecdsa':
                    private_key = paramiko.ECDSAKey.from_private_key_file(
                        self.private_key_file)
                else:
                    private_key = paramiko.RSAKey.from_private_key_file(
                        self.private_key_file)
            except IOError:
                self.warning("Unable to find private key file: %s",
                             self.private_key_file)
            except paramiko.ssh_exception.PasswordRequiredException:
                self.warning(
                    "Private key file is encrypted but no password was given")
            except paramiko.ssh_exception.SSHException:
                self.warning("Private key file is invalid")

        client = paramiko.SSHClient()
        if self.add_missing_keys:
            client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        client.load_system_host_keys()

        exception_message = "No errors occured"
        try:
            # Try to connect to check status of SSH
            try:
                client.connect(self.host,
                               port=self.port,
                               username=self.username,
                               password=self.password,
                               pkey=private_key)
                self.service_check(self.SSH_SERVICE_CHECK_NAME,
                                   AgentCheck.OK,
                                   tags=self.base_tags,
                                   message=exception_message)

            except Exception as e:
                exception_message = str(e)
                status = AgentCheck.CRITICAL
                self.service_check(self.SSH_SERVICE_CHECK_NAME,
                                   status,
                                   tags=self.base_tags,
                                   message=exception_message)
                if self.sftp_check:
                    self.service_check(self.SFTP_SERVICE_CHECK_NAME,
                                       status,
                                       tags=self.base_tags,
                                       message=exception_message)
                raise

            self._collect_metadata(client)

            # Open sftp session on the existing connection to check status of SFTP
            if self.sftp_check:
                try:
                    sftp = client.open_sftp()
                    # Check response time of SFTP
                    start_time = time.time()
                    sftp.listdir('.')
                    status = AgentCheck.OK
                    end_time = time.time()
                    time_taken = end_time - start_time
                    self.gauge('sftp.response_time',
                               time_taken,
                               tags=self.base_tags)

                except Exception as e:
                    exception_message = str(e)
                    status = AgentCheck.CRITICAL

                if exception_message is None:
                    exception_message = "No errors occured"

                self.service_check(self.SFTP_SERVICE_CHECK_NAME,
                                   status,
                                   tags=self.base_tags,
                                   message=exception_message)
        finally:
            # Always close the client, failure to do so leaks one thread per connection left open
            client.close()
def main():
	print (">>>>>>>>>>>>>>> STEP1 - create ec2 instance if it doesn't already exit")
	# createKey()

	EC2 = boto3.resource('ec2')

	print("get instances")
	instances = EC2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])
	i = 0
	ec2_instance_id = "no_running_ec2_instances"
	ec2_instance = "no_running_ec2_instances"
	for instance in instances:
		print(instance.id, instance.instance_type)
		ec2_instance_id = instance.id
		ec2_instance = instance

	print("get running instance")
	print (ec2_instance_id)
	print (ec2_instance)

	if (ec2_instance_id == "no_running_ec2_instances"):
		print ("creating instance")
		ec2_instance = EC2.create_instances(ImageId='ami-00068cd7555f543d5', MinCount=1, MaxCount=1, KeyName="sft2020-x10-a", InstanceType='t3.micro')
		print ("new instance name : " + ec2_instance[0].id)
		print ("connect: " + ec2_instance_id)
	
	print(ec2_instance_id)
	print(ec2_instance)

	print (">>>>>>>>>>>>>>> STEP2 - Setup ssh")
	x = 1 
	# Setup ssh using paramiko
	ssh = paramiko.SSHClient()
	ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
	# for now we are using our key - but we can also create key (see routine to create key)
	privkey =  paramiko.RSAKey.from_private_key_file('../../../keys/sft2020-x10-a.pem')


	print (">>>>>>>>>>>>>>> STEP3 - Connect ec2 instance")
	try: 
		# now connect to ec2 instance
		print (ec2_instance)
		print ("connect " + ec2_instance.public_dns_name)
		ssh.connect(ec2_instance.public_dns_name,username='******',pkey=privkey)
		
		print (">>>>>>>>>>>>>>> STEP4 - clean environment")
		execute_script_in_ec2_generic(ssh, ec2_instance, "ucd1", "rm *.gz*")
		execute_script_in_ec2_generic(ssh, ec2_instance, "ucd1", "rm *.zip")
		
		print (">>>>>>>>>>>>>>> STEP5 - Deploy  test script")
		deploy_to_aws(ssh, ec2_instance, "ucd1", "dre1_test1", ".tar.gz")
		
		print (">>>>>>>>>>>>>>> STEP6 - Deploy  problem2 script")
		deploy_to_aws(ssh, ec2_instance, "ucd1", "problem2", ".tar.gz")
		
		print (">>>>>>>>>>>>>>> STEP7 - Deploy  EVENTS_10856_111111001 script")
		# https://ucd1.s3.amazonaws.com/EVENTS_10856_111111001.csv.zip
		deploy_to_aws(ssh, ec2_instance, "ucd1", "EVENTS_10856_111111001", ".csv.zip")

		# Setup environment for ec2 machine
		# sudo yum install python37
		# curl -O https://bootstrap.pypa.io/get-pip.py
		# python get-pip.py --user
		# python3 get-pip.py --user
		# pip install pyspark
		print (">>>>>>>>>>>>>>> STEP8 - execute test script")
		execute_script_in_ec2(ssh, ec2_instance, "ucd1",  "dre1_test1")
		print (">>>>>>>>>>>>>>> STEP9 - execute problem2 script")
		execute_script_in_ec2(ssh, ec2_instance, "ucd1",  "problem2")

		ssh.close()		
	except:
		print("----------------------------------------------------------------------------------")
		print("WARNING: If this is the first time - the aws may be initializing.  Try after 5 minutes")
		print("----------------------------------------------------------------------------------")
Exemple #11
0
Ec2_describe = sys.argv[1]
user = sys.argv[2]
path_httpd = sys.argv[3]
remote_httpd_path = '/tmp/httpd.conf'

try:
    with open(
            Ec2_describe, "r"
    ) as nodes:  ##we parse the file to get the values we need for the ssh connection
        data = json.load(nodes)

        for x in data['Reservations'][0]['Instances']:

            ssh_client = paramiko.SSHClient()
            ssh_client.set_missing_host_key_policy(
                paramiko.AutoAddPolicy()
            )  #Policy for automatically adding the hostname and new host key to the local HostKeys object, and saving it
            ssh_client.connect(hostname=(x['PublicDnsName']),
                               username='******',
                               key_filename=(x['KeyName']) + ".pem")

            sftp_client = ssh_client.open_sftp()
            sftp_client.put(path_httpd, remote_httpd_path)
            sftp_client.close()

            stdin, stdout, stderr = ssh_client.exec_command(
                'sudo cp /etc/httpd/conf/httpd.conf /tmp/httpdBAK.conf')
            stdin, stdout, stderr = ssh_client.exec_command(
                'sudo mv /tmp/httpd.conf /etc/httpd/conf/httpd.conf')

            stdin, stdout, stderr = ssh_client.exec_command(
Exemple #12
0
username = config['cucm']['username']
password = config['cucm']['password']
router = config['routers']['ip']
domain = config['cucm']['domain']
cucmHostName = config['cucm']['hostname']

print('My configuration:')
print(f'CUCM IP: {ip}')
print(f'User: {username}')

host = ip

# Connect to CUCM via SSH
remote_conn_pre = paramiko.SSHClient()
remote_conn_pre
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
remote_conn_pre.connect(ip,
                        username=username,
                        password=password,
                        look_for_keys=False,
                        allow_agent=False,
                        timeout=60)
print("SSH connection established to " + host)
remote_conn = remote_conn_pre.invoke_shell()
print("Interactive SSH session established")
time.sleep(15)

# Upload CFB Router certificate to CUCM server
with open(router + '.txt', 'r') as myfile:
    data = myfile.read()
remote_conn.send("\n")
    def schedule_backup_process(self):
        conf_ids = self.search([])
        folder_path = ''

        for rec in conf_ids:
            db_list = self.get_db_list(rec.host, rec.port)

            if rec.name in db_list:
                try:
                    logger.info(
                        "Function: schedule_backup_process - Folder path is " +
                        str(rec.folder))
                    folder_path = rec.folder if rec.folder else '//db_backup'
                    logger.info(
                        "Function: schedule_backup_process - Folder path is " +
                        str(folder_path))

                    if not os.path.isdir(folder_path):
                        logger.info(
                            "Function: schedule_backup_process - Folder is "
                            "not directory, we will create the directory")
                        os.makedirs(folder_path)
                    else:
                        logger.info(
                            "Function: schedule_backup_process - Folder is directory"
                        )
                except Exception as e:
                    raise ValidationError(
                        'Function: schedule_backup_process - error is ' +
                        str(e))

                # Create name for dumpfile.
                bkp_file = '%s_%s.%s' % (time.strftime('%Y_%m_%d_%H_%M_%S'),
                                         rec.name, rec.backup_type)
                file_path = os.path.join(folder_path, bkp_file)
                uri = 'http://' + rec.host + ':' + rec.port
                conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
                bkp = ''

                logger.info(
                    'Function: schedule_backup_process - Parameters: bkp_file: '
                    '%s - file_path: %s - uri: %s - conn: %s - '
                    'bkp: %s' % (bkp_file, file_path, uri, conn, bkp))

                try:
                    # try to backup database and write it away
                    fp = open(file_path, 'wb')
                    odoo.service.db.dump_db(rec.name, fp, rec.backup_type)
                    fp.close()
                except Exception as error:
                    logger.debug(
                        "Couldn't backup database %s. Bad database administrator "
                        "password for server running at http://%s:%s" %
                        (rec.name, rec.host, rec.port))
                    logger.info(
                        "Function: schedule_backup_process - Parameters: Couldn't backup database %s. "
                        "Bad database administrator password for server running at http://%s:%s"
                        % (rec.name, rec.host, rec.port))
                    logger.debug("Exact error from the exception: " +
                                 str(error))
                    logger.info(
                        "Function: schedule_backup_process - Parameters: Exact error from the exception: "
                        + str(error))

                    continue

            else:
                logger.debug("database %s doesn't exist on http://%s:%s" %
                             (rec.name, rec.host, rec.port))
                logger.info("Function: schedule_backup_process - database %s "
                            "doesn't exist on http://%s:%s" %
                            (rec.name, rec.host, rec.port))

            # Check if user wants to write to SFTP or not.
            if rec.sftp_write is True:
                try:
                    # Store all values in variables
                    sftp = None
                    dir = folder_path
                    path_to_write_to = rec.sftp_path
                    ip_host = rec.sftp_host
                    port_host = rec.sftp_port
                    user_name_login = rec.sftp_user
                    password_login = rec.sftp_password

                    logger.debug('sftp remote path: %s' % path_to_write_to)

                    logger.info(
                        'Function: schedule_backup_process - Parameters: path_to_write_to: '
                        '%s - ip_host: %s - port_host: %s - user_name_login: %s - '
                        'password_login: %s' %
                        (path_to_write_to, ip_host, port_host, user_name_login,
                         password_login))

                    try:
                        s = paramiko.SSHClient()
                        s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                        s.connect(ip_host,
                                  port_host,
                                  user_name_login,
                                  password_login,
                                  timeout=20)
                        sftp = s.open_sftp()
                        logger.info(
                            'Function: schedule_backup_process - success connecting to remote'
                        )
                    except Exception as error:
                        logger.critical(
                            'Error connecting to remote server! Error: ' +
                            str(error))
                        logger.info(
                            'Function: schedule_backup_process - Error connecting to remote'
                            ' server! Error: ' + str(error))

                    try:
                        sftp.chdir(path_to_write_to)
                        logger.info(
                            'Function: schedule_backup_process - Parameters: '
                            'path_to_write_to: %s' % path_to_write_to)

                    except IOError:
                        # Create directory and subdirs if they do not exist.
                        current_dir = ''
                        for dirElement in path_to_write_to.split('/'):
                            current_dir += dirElement + '/'
                            logger.info(
                                'Function: schedule_backup_process - Parameters: '
                                'current_dir: %s' % current_dir)
                            try:
                                sftp.chdir(current_dir)
                            except:
                                logger.info(
                                    '(Part of the) path didn\'t exist. Creating it now at '
                                    + current_dir)

                                # Make directory and then navigate into it
                                sftp.mkdir(current_dir, 777)
                                sftp.chdir(current_dir)
                                pass
                    sftp.chdir(path_to_write_to)

                    logger.info(
                        'Function: schedule_backup_process - The new file is '
                        + str(rec.name))

                    # Loop over all files in the directory.
                    for f in os.listdir(dir):
                        logger.info(
                            'Function: schedule_backup_process - Start looping the '
                            'directory to upload the new file :' + str(f))
                        if rec.name in f:
                            logger.info(
                                'Function: schedule_backup_process - Start looping : directory'
                            )
                            fullpath = os.path.join(dir, f)
                            logger.info(
                                'Function: schedule_backup_process - Parameters: '
                                'fullpath: %s' % fullpath)
                            if os.path.isfile(fullpath):
                                try:
                                    sftp.stat(os.path.join(
                                        path_to_write_to, f))
                                    logger.debug(
                                        'File %s already exists on the remote FTP Server ------ skipped'
                                        % fullpath)
                                # This means the file does not exist (remote) yet!
                                except IOError:
                                    try:
                                        # sftp.put(fullpath, path_to_write_to)
                                        sftp.put(
                                            fullpath,
                                            os.path.join(path_to_write_to, f))
                                        logger.info(
                                            'Copying File % s------ success' %
                                            fullpath)
                                    except Exception as err:
                                        logger.critical(
                                            'We couldn\'t write the file to the remote server. Error: '
                                            + str(err))
                                        logger.info(
                                            'Copying File % s------ failed' %
                                            fullpath)

                        # Navigate in to the correct folder.
                        sftp.chdir(path_to_write_to)

                        # Loop over all files in the directory from the back-ups.
                        # We will check the creation date of every back-up.
                        for file in sftp.listdir(path_to_write_to):
                            if rec.name in file:
                                # Get the full path
                                fullpath = os.path.join(path_to_write_to, file)

                                # Get the timestamp from the file on the external server
                                timestamp = sftp.stat(fullpath).st_atime
                                createtime = datetime.datetime.fromtimestamp(
                                    timestamp)
                                now = datetime.datetime.now()
                                delta = now - createtime

                                # If the file is older than the days_to_keep_sftp
                                # (the days to keep that the user filled in on the Odoo form it will be removed.
                                if delta.days >= rec.days_to_keep_sftp:
                                    # Only delete files, no directories!
                                    if sftp.isfile(fullpath) and (
                                            ".dump" in file or '.zip' in file):
                                        logger.info(
                                            "Delete too old file from SFTP servers: "
                                            + file)
                                        sftp.unlink(file)
                    # Close the SFTP session.
                    sftp.close()

                    try:
                        ir_mail_server = self.env['ir.mail_server'].search(
                            [('active', '=', 'true')], limit=1)
                        if ir_mail_server:
                            message = "Dear,\n\nThe backup for the server " + \
                                      rec.host + " (IP: " + rec.sftp_host + ") succeeded"
                            msg = ir_mail_server.build_email(
                                email_from=ir_mail_server.smtp_user,
                                email_to=[
                                    ir_mail_server.smtp_user,
                                    rec.email_to_notify
                                ],
                                subject='Daily Backup : Success',
                                body=message,
                            )
                            ir_mail_server.send_email(msg)
                            logger.info(
                                'Function: schedule_backup_process - email sent to inform the success.'
                            )
                    except Exception as e:
                        logger.info(
                            'Function: schedule_backup_process - email cannot sent to inform the success due problem '
                            + str(e))

                except Exception as e:
                    logger.debug(
                        'Exception! We could not back up to the FTP server..')
                    if rec.send_mail_sftp_fail:
                        try:
                            ir_mail_server = self.env['ir.mail_server'].search(
                                [('active', '=', 'true')], limit=1)
                            if ir_mail_server:
                                message = "Dear,\n\nThe backup for the server " + \
                                          rec.host + " (IP: " + rec.sftp_host + ") failed"
                                msg = ir_mail_server.build_email(
                                    email_from=ir_mail_server.smtp_user,
                                    email_to=[
                                        ir_mail_server.smtp_user,
                                        rec.email_to_notify
                                    ],
                                    subject='Daily Backup : Failed',
                                    body=message,
                                )
                                ir_mail_server.send_email(msg)
                                logger.info(
                                    'Function: schedule_backup_process - Error due : '
                                    + str(e))
                        except Exception as e:
                            logger.info(
                                'Function: schedule_backup_process - email cannot sent to inform '
                                'the failed due problem ' + str(e))
            """
            Remove all old files (on local server) in case this is configured..
            """
            if rec.autoremove:
                dir = folder_path
                # Loop over all files in the directory.
                for f in os.listdir(dir):
                    fullpath = os.path.join(dir, f)
                    # Only delete the ones wich are from the current database
                    # (Makes it possible to save different databases in the same folder)
                    if rec.name in fullpath:
                        timestamp = os.stat(fullpath).st_ctime
                        createtime = datetime.datetime.fromtimestamp(timestamp)
                        now = datetime.datetime.now()
                        delta = now - createtime
                        if delta.days >= rec.days_to_keep:
                            # Only delete files (which are .dump and .zip), no directories.
                            if os.path.isfile(fullpath) and (".dump" in f
                                                             or '.zip' in f):
                                logger.info("Delete local out-of-date file: " +
                                            fullpath)
                                os.remove(fullpath)
 def get_conn(self):
     conn = paramiko.SSHClient()
     conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
     conn.connect(hostname=self.SERVER_IP, username="******", password="******")
     self.conn = conn
def createSSHClient(server, user, sshkey):
    client = paramiko.SSHClient()
    client.load_system_host_keys()
    client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    client.connect(server, username=user, key_filename=sshkey)
    return client
def update_worker_json(name, entry):

    fd, tmp_json_path = tempfile.mkstemp()
    foreign_json = os.path.join(SUBMITTY_DATA_DIR, "autograding_TODO",
                                "autograding_worker.json")
    autograding_worker_to_ship = entry

    try:
        user = autograding_worker_to_ship[name]['username']
        host = autograding_worker_to_ship[name]['address']
    except Exception as e:
        print(
            "ERROR: autograding_workers.json entry for {0} is malformatted. {1}"
            .format(e, name))
        grade_items_logging.log_message(
            JOB_ID,
            message=
            "ERROR: autograding_workers.json entry for {0} is malformed. {1}".
            format(e, name))
        grade_items_logging.log_stack_trace(job_id=JOB_ID,
                                            trace=traceback.format_exc())
        return False

    #create a new temporary json with only the entry for the current machine.
    with open(tmp_json_path, 'w') as outfile:
        json.dump(autograding_worker_to_ship,
                  outfile,
                  sort_keys=True,
                  indent=4)
    #if we are updating the current machine, we can just move the new json to the appropriate spot (no ssh needed)
    if host == "localhost":
        try:
            shutil.move(tmp_json_path, foreign_json)
            print(
                "Successfully updated local autograding_TODO/autograding_worker.json"
            )
            grade_items_logging.log_message(
                JOB_ID,
                message=
                "Successfully updated local autograding_TODO/autograding_worker.json"
            )
            return True
        except Exception as e:
            grade_items_logging.log_stack_trace(job_id=JOB_ID,
                                                trace=traceback.format_exc())
            grade_items_logging.log_message(
                JOB_ID,
                message=
                "ERROR: could not mv to local autograding_TODO/autograding_worker.json due to the following error: "
                + str(e))
            print(
                "ERROR: could not mv to local autograding_worker.json due to the following error: {0}"
                .format(e))
            return False
        finally:
            os.close(fd)
    #if we are updating a foreign machine, we must connect via ssh and use sftp to update it.
    else:
        #try to establish an ssh connection to the host
        try:
            ssh = paramiko.SSHClient()
            ssh.get_host_keys()
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh.connect(hostname=host, username=user, timeout=5)
        except Exception as e:
            grade_items_logging.log_stack_trace(job_id=JOB_ID,
                                                trace=traceback.format_exc())
            grade_items_logging.log_message(
                JOB_ID,
                message=
                "ERROR: could not ssh to {0}@{1} due to following error: {2}".
                format(user, host, str(e)))
            print(
                "ERROR: could not ssh to {0}@{1} due to following error: {2}".
                format(user, host, str(e)))
            return False
        #try to copy the files over to the host
        try:
            sftp = ssh.open_sftp()

            sftp.put(tmp_json_path, foreign_json)

            sftp.close()
            print(
                "Successfully forwarded autograding_worker.json to {0}".format(
                    name))
            grade_items_logging.log_message(
                JOB_ID,
                message="Successfully forwarded autograding_worker.json to {0}"
                .format(name))
            success = True
        except Exception as e:
            grade_items_logging.log_stack_trace(job_id=JOB_ID,
                                                trace=traceback.format_exc())
            grade_items_logging.log_message(
                JOB_ID,
                message=
                "ERROR: could not sftp to foreign autograding_TODO/autograding_worker.json due to the following error: "
                + str(e))
            print(
                "ERROR: could sftp to foreign autograding_TODO/autograding_worker.json due to the following error: {0}"
                .format(e))
            success = False
        finally:
            os.close(fd)
            os.remove(tmp_json_path)
            sftp.close()
            ssh.close()
            return success
Exemple #17
0
def PyRaftRecon(sSliceNew, eSliceNew, Shift, Centering, Regularization,
                Composition, Threshold, ImageSize, ImageFormat, BlocksNumber,
                GPUNumber, OpenImage, RecMetodo, nSlices):

    sftpURL = '10.2.105.44'
    sftpUser = '******'
    sftpPass = '******'

    ssh = paramiko.SSHClient()
    # automatically add keys without requiring human intervention
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    ssh.connect(sftpURL, username=sftpUser, password=sftpPass)

    sftp_client = ssh.open_sftp()

    path = "cd /" + pvRLoc.get(as_string=True) + "/" + pvRUserName.get(
        as_string=True) + "/" + pvRExpPath.get(
            as_string=True) + "/" + pvReconPath.get(as_string=True) + "; ls"

    stdin, stdout, stderr = ssh.exec_command(path)

    x = []

    pvreconWait.put(1)

    while True:
        line = stdout.readline()
        line = line.replace('\n', '')

        if line != '':
            x.append(line)

        else:
            break

    path2 = "/" + pvRLoc.get(as_string=True) + "/" + pvRUserName.get(
        as_string=True) + "/" + pvRExpPath.get(
            as_string=True) + "/" + pvReconPath.get(as_string=True) + "/"

    if 'input.par' in x:

        path3 = "/" + pvRLoc.get(as_string=True) + "/" + pvRUserName.get(
            as_string=True) + "/" + pvRExpPath.get(
                as_string=True) + "/" + pvReconPath.get(
                    as_string=True) + "/input.par"
        path4 = "/" + pvRLoc.get(as_string=True) + "/" + pvRUserName.get(
            as_string=True) + "/" + pvRExpPath.get(
                as_string=True) + "/" + pvReconPath.get(
                    as_string=True) + "/arquivo.par"

        inputpar = sftp_client.open(path3, '+r')
        arquivopar = sftp_client.open(path4, '+w')

        try:
            words = [
                '#START_SLICE', '#END_SLICE', '#ROTATION_SHIFT', '#CENTERING',
                '#THRESHOLD', '#REGULARIZATION', '#FILTER_COMPOSITION',
                '#NUMBER_OF_BLOCKS', '#IMAGE_FORMAT', '#IMAGE_SIZE',
                '#GPU_NUMBER'
            ]

            for line in inputpar:

                if not any(word in line for word in words):
                    arquivopar.write(line)

                if (words[0] in line) == True:
                    arquivopar.write(words[0] + ' = %d \n' % sSliceNew)

                if (words[1] in line) == True:
                    arquivopar.write(words[1] + ' = %d \n' % eSliceNew)

                if (words[2] in line) == True:
                    arquivopar.write(words[2] + ' = %f \n' % Shift)

                if (words[3] in line) == True:
                    arquivopar.write(words[3] + ' = %f \n' % Centering)

                if (words[4] in line) == True:
                    arquivopar.write(words[4] + ' = %d \n' % Threshold)

                if (words[5] in line) == True:
                    arquivopar.write(words[5] + ' = %f \n' % Regularization)

                if (words[6] in line) == True:
                    arquivopar.write(words[6] + ' = %d \n' % Composition)

                if (words[7] in line) == True:
                    arquivopar.write(words[7] + ' = %d \n' % BlocksNumber)

                if (words[8] in line) == True:
                    arquivopar.write(words[8] + ' = %d \n' % ImageFormat)

                if (words[9] in line) == True:
                    arquivopar.write(words[9] + ' = %d \n' % ImageSize)

                if (words[10] in line) == True:
                    arquivopar.write(words[10] + ' = %d \n' % GPUNumber)

        finally:
            inputpar.close()
            arquivopar.close()

            sftp_client.remove(path3)
            sftp_client.rename(path4, path3)

        msg = 'Recon Started using PyRaft' + ' at ' + time.strftime(
            "%H:%M - %d-%m-%Y") + '\n' + 'With a total of ' + str(
                nSlices) + ' image(s)' + '\n' + 'Path: ' + str(
                    path2) + '\n' + 'WAIT!!!'
        error8.put(msg)
        print('\n', msg, '\n\n')

        Path = "/" + pvRLoc.get(as_string=True) + "/" + pvRUserName.get(
            as_string=True) + "/" + pvRExpPath.get(
                as_string=True) + "/" + pvReconPath.get(as_string=True)

        ### C ###
        BlockSize = 1
        if (1 < nSlices <= 20):
            BlockSize = 4
        elif (20 < nSlices < 60):
            BlockSize = 40
        elif (nSlices >= 60):
            BlockSize = 60

        Regularization = (Regularization / 100000)

        if (pvaRange.get() == 360):
            cmd_recon = 'ssh 10.2.105.44 "/apps/IMX/ReconSoft/raft/bin/raft_imx -p ' + Path + '/ -i ' + str(
                sSliceNew - 1) + ' -f ' + str(eSliceNew) + ' -b ' + str(
                    BlockSize) + ' -r ' + str(Regularization) + ' -c ' + str(
                        Composition) + ' -m ' + str(Threshold) + ' -s ' + str(
                            ImageSize
                        ) + ' -n ' + str(ImageFormat) + ' -d ' + str(
                            BlocksNumber) + ' -g ' + str(
                                GPUNumber
                            ) + ' -o ' + str(Shift) + ' -a ' + str(
                                int(Centering)) + ' -h 3"'  # NEW RECON MACHINE
            #error8.put("360 Recon Not Working Yet...")
            #pvreconWait.put(0)
            #sys.exit()

        else:
            cmd_recon = 'ssh 10.2.105.44 "/apps/IMX/ReconSoft/raft/bin/raft_imx -p ' + Path + '/ -i ' + str(
                sSliceNew - 1) + ' -f ' + str(eSliceNew) + ' -b ' + str(
                    BlockSize) + ' -r ' + str(Regularization) + ' -c ' + str(
                        Composition) + ' -m ' + str(Threshold) + ' -s ' + str(
                            ImageSize) + ' -n ' + str(
                                ImageFormat) + ' -d ' + str(
                                    BlocksNumber) + ' -g ' + str(
                                        GPUNumber) + ' -o ' + str(
                                            Shift) + '"'  # NEW RECON MACHINE

        #cmd_recon = 'ssh 10.2.105.181 "/storage/raft/bin/raft_imx -p ' + Path + '/ -i ' + str(sSliceNew-1) + ' -f ' + str(eSliceNew) + ' -b ' + str(BlockSize) + ' -r ' + str(Regularization) + ' -c ' + str(Composition) + ' -m ' + str(ThresholdMAX) + '"' 				# OLD RECON MACHINE

        os.system(cmd_recon)

        print(cmd_recon)

        if ImageFormat == 1:  #8Bits
            volfromslice = 'ssh 10.2.105.44 "python /apps/IMX/ReconSoft/imx_volfromslice.py ' + Path + ' ' + str(
                int(sSliceNew) -
                1) + ' ' + str(int(eSliceNew) - 1) + ' ' + str(
                    ImageSize) + '"'  # NEW RECON MACHINE
            #volfromslice = 'ssh 10.2.105.181 "python /storage/imx_volfromslice.py ' + Path + ' ' + str(sSliceNew-1) + ' ' + str(eSliceNew-1) + '"' 						# OLD RECON MACHINE

        if ImageFormat == 0:  #32Bits
            volfromslice = 'ssh 10.2.105.44 "python /apps/IMX/ReconSoft/imx_volfromslice32.py ' + Path + ' ' + str(
                int(sSliceNew) -
                1) + ' ' + str(int(eSliceNew) - 1) + ' ' + str(
                    ImageSize) + '"'  # NEW RECON MACHINE
            #volfromslice = 'ssh 10.2.105.181 "python /storage/imx_volfromslice.py ' + Path + ' ' + str(sSliceNew-1) + ' ' + str(eSliceNew-1) + '"' 						# OLD RECON MACHINE

        os.system(volfromslice)

        #cmd_remove = 'ssh 10.2.105.44 "rm ' + Path + '/recon/slice_*"'
        #os.system(cmd_remove)

        msg = 'Recon FINISHED' + ' at ' + time.strftime(
            "%H:%M - %d-%m-%Y") + '\n' + 'With a total of ' + str(
                nSlices) + ' image(s)' + '\n' + 'Path: ' + str(path2)
        error8.put(msg)
        print('\n', msg, '\n\n')

    else:
        msg = 'ERROR!!! \nThere is no input.par on ' + str(path2)
        error8.put(msg)
        print('\n', msg, '\n\n')

        OpenImage = 0

    if (OpenImage == 1 and ImageFormat == 1):  #8Bits
        os.system('Scripts_Recon/Open_Image.py tomo' + str(nSlices) + '.b &')

    if (OpenImage == 1 and ImageFormat == 0):  #8Bits
        os.system('Scripts_Recon/Open_Image.py tomo' + str(nSlices) +
                  '_32.b &')

    pvreconWait.put(0)
def prepare_job(my_name, which_machine, which_untrusted, next_directory,
                next_to_grade):
    # verify the DAEMON_USER is running this script
    if not int(os.getuid()) == int(DAEMON_UID):
        grade_items_logging.log_message(
            JOB_ID, message="ERROR: must be run by DAEMON_USER")
        raise SystemExit(
            "ERROR: the grade_item.py script must be run by the DAEMON_USER")

    if which_machine == 'localhost':
        address = which_machine
    else:
        address = which_machine.split('@')[1]

    # prepare the zip files
    try:
        autograding_zip_tmp, submission_zip_tmp = packer_unpacker.prepare_autograding_and_submission_zip(
            which_machine, which_untrusted, next_directory, next_to_grade)
        fully_qualified_domain_name = socket.getfqdn()
        servername_workername = "{0}_{1}".format(fully_qualified_domain_name,
                                                 address)
        autograding_zip = os.path.join(
            SUBMITTY_DATA_DIR, "autograding_TODO",
            servername_workername + "_" + which_untrusted + "_autograding.zip")
        submission_zip = os.path.join(
            SUBMITTY_DATA_DIR, "autograding_TODO",
            servername_workername + "_" + which_untrusted + "_submission.zip")
        todo_queue_file = os.path.join(
            SUBMITTY_DATA_DIR, "autograding_TODO",
            servername_workername + "_" + which_untrusted + "_queue.json")

        with open(next_to_grade, 'r') as infile:
            queue_obj = json.load(infile)
            queue_obj["which_untrusted"] = which_untrusted
            queue_obj["which_machine"] = which_machine
            queue_obj["ship_time"] = dateutils.write_submitty_date(
                microseconds=True)
    except Exception as e:
        grade_items_logging.log_stack_trace(job_id=JOB_ID,
                                            trace=traceback.format_exc())
        grade_items_logging.log_message(
            JOB_ID,
            message=
            "ERROR: failed preparing submission zip or accessing next to grade "
            + str(e))
        print(
            "ERROR: failed preparing submission zip or accessing next to grade ",
            e)
        return False

    if address == "localhost":
        try:
            shutil.move(autograding_zip_tmp, autograding_zip)
            shutil.move(submission_zip_tmp, submission_zip)
            with open(todo_queue_file, 'w') as outfile:
                json.dump(queue_obj, outfile, sort_keys=True, indent=4)
        except Exception as e:
            grade_items_logging.log_stack_trace(job_id=JOB_ID,
                                                trace=traceback.format_exc())
            grade_items_logging.log_message(
                JOB_ID,
                message=
                "ERROR: could not move files due to the following error: " +
                str(e))
            print(
                "ERROR: could not move files due to the following error: {0}".
                format(e))
            return False
    else:
        try:
            user, host = which_machine.split("@")
            ssh = paramiko.SSHClient()
            ssh.get_host_keys()
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

            ssh.connect(hostname=host, username=user, timeout=5)
            sftp = ssh.open_sftp()

            sftp.put(autograding_zip_tmp, autograding_zip)
            sftp.put(submission_zip_tmp, submission_zip)
            with open(todo_queue_file, 'w') as outfile:
                json.dump(queue_obj, outfile, sort_keys=True, indent=4)
            sftp.put(todo_queue_file, todo_queue_file)
            os.remove(todo_queue_file)
            print("Successfully forwarded files to {0}".format(my_name))
            success = True
        except Exception as e:
            grade_items_logging.log_stack_trace(job_id=JOB_ID,
                                                trace=traceback.format_exc())
            grade_items_logging.log_message(
                JOB_ID,
                message=
                "ERROR: could not move files due to the following error: " +
                str(e))
            print(
                "Could not move files due to the following error: {0}".format(
                    e))
            success = False
        finally:
            sftp.close()
            ssh.close()
            os.remove(autograding_zip_tmp)
            os.remove(submission_zip_tmp)
            return success

    # log completion of job preparation
    obj = packer_unpacker.load_queue_file_obj(JOB_ID, next_directory,
                                              next_to_grade)
    partial_path = os.path.join(obj["gradeable"], obj["who"],
                                str(obj["version"]))
    item_name = os.path.join(obj["semester"], obj["course"], "submissions",
                             partial_path)
    is_batch = "regrade" in obj and obj["regrade"]
    grade_items_logging.log_message(JOB_ID,
                                    jobname=item_name,
                                    which_untrusted=which_untrusted,
                                    is_batch=is_batch,
                                    message="Prepared job for " +
                                    which_machine)
    return True
Exemple #19
0
def backend(src,dst):
	src=src
	dst=dst	
	        
	arr=[]
	count=0

	setofnames=set()
	dictofnames={}
	dictofobj={}

	name=''
	s={src}
	now=src
	honame=set()
	exit=dict()
	entry=dict()
	entryrev=dict()
	ls=[]
	ls.append(now)
	extract=set()
	p=''
	boo=True
	intojson=[]

	while(len(s)>0):
	    now=ls[0]
	    boo=True

            rem=paramiko.SSHClient()
            rem.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            boo=True
            while boo:
                try:
                    rem.connect(now,port=22, username='******',password='******')
                    boo=False
                    print ("SSH connection established for getting the version - ",now)
                    stdin,stdout,stderr=rem.exec_command("show version")
                except Exception as e:
                    print("Error in ssh connection, Trying again. Error - ",e) 
                    boo=True
  
    

            output=stdout.readlines()
            print(output)
            output='\n'.join(output)
            k9=output.replace('\n',' ')
            print("\n\n\n\n")
            print(k9)
            k9=k9.split()
            print("\n\n\n\n")
            print(k9)
            a=k9.index('Cisco')
            #print(a)
            #print(k9[a+1])
            ios_ver=''
            verdict={}
            if (k9[a+1]=='IOS'):
                if (k9[a+2]=='XE'):
                    ios_ver='IOS_XE'
                else:
                    ios_ver='IOS'
            else:
                ios_ver='NEXUS'
            
            print(ios_ver)


            verdict['soft_ver']=ios_ver

            if ios_ver=='NEXUS':
                var=k9.index('BIOS:')
                var3=k9.index('kickstart:')
                verdict['version']="BIOS: "+k9[var+2]+" Kickstart: "+k9[var3+2]
                var=k9.index('uptime')
                var2=k9.index('second(s)')
                verdict['uptime']=' '.join(k9[var+2:var2+1])
                var=k9.index('Hardware')
                verdict['hardware']=' '.join(k9[var+1:var+3])
                var=k9.index('Reason:')
                verdict['reload_reason']=k9[var+1]
            
            elif ios_ver=='IOS_XE':
                var=k9.index('weeks,')
                var2=k9.index('minutes')
                verdict['uptime']=' '.join(k9[var-1:var2+1])
            
                var=k9.index('Last')
                var2=k9.index('This')
                verdict['reload_reason']=' '.join(k9[var+3:var2])
             
                var=k9.index('Version')
            
                verdict['version']=k9[var+1]
                
                var=k9.index('Release')
                verdict['hardware']=k9[var+4]  
            
                 
            
            else:
                
                var=k9.index('Version')
                verdict['version']=' '.join(k9[var+1][:-1])
                var=k9.index('Software,')
                verdict['hardware']=k9[var+1]  
                var=k9.index('uptime')
                var2=k9.index('minutes')
                verdict['uptime']=' '.join(k9[var+2:var2+1])            
                var=k9.index('reason:')
                var2=k9.index('This')
                verdict['reload_reason']=' '.join(k9[var+1:var2])

            print(verdict)
 

            rem.close()


            boo=True
            while boo:
                try:
                    ssh= ConnectHandler(device_type=ios_ver,host=now,username="******",password="******")
                    boo=False
                except Exception as e:
                    boo=True
                    print(" Connection error, trying again ",e)






	    #ret=ssh.send_command("en")
	    boo=True
	    while boo:
	        try: 
	            name=ssh.find_prompt()
	            boo=False
	        except Exception as e:
	            print(str(e))
	            print("Trying again")
	            boo=True
	        if not(re.match("^[A-Z]{3}-{2}[A-Z]{2}[0-9]{2}#{1}$",name)):
	            boo=True
	            print(" Name Received Incorrect- Trying again "+name)
	    
	    name=name[:-1]


	    if name not in setofnames:
	        setofnames.add(name)
	        dictofnames[name]=count
	        k=router(name)
	        arr.append(k)
	        dictofobj[name]=k
	        dictofobj[name].addconnect(ssh)
	        dictofobj[name].addsship(now)
	        count+=1
	    dictofobj[name].gennodedict['version']=verdict	        
	    
	    print(name)
	    honame.add(name)
	    print("dict of names ")
	    print(dictofnames)
	    boo2=True
	    while boo2:
	        
	    
	        boo=True
	        while boo:
	            try:
	                ret=ssh.send_command("sh ip route "+dst+" | include Known via")
	                boo=False
	            except Exception as e:
	                boo=True
	                print("1 Exception Handled- Trying again")
	            print(" return from sh ip route | inc known via ")
	            print(ret)
	            if not ret:
	                print("1 Trying again")
	                boo=True
	            elif isinstance(ret,list):
	                print("1 Return from sh ip route is a list, trying again")
	                boo=True
	            elif len(ret.split())>=3:
	                boo=False
	            else:
	                print("1 Trying Again sh ip route")
	                boo=True
	        print(" Name "+name+" show ip route | i known via")
	        print(ret)
	        ret=ret.split()
	        prot=ret[2][1:]
	        print("PROT- "+prot)
	        if prot!='bgp' and prot !='connected",' and prot!='eigrp':
	            boo2=True
	            print(" Protocol received isn't correct. Trying Again ")
	        else:
	            boo2=False
	            


	    
	    if prot=='bgp':
	        dst1=dst
	        fl=0
	        print("Prot BGP")
	        while fl!=2:
	            boo=True
	            while boo:
	                try:
	                    ret=ssh.send_command("sh ip route "+dst1)
	                    boo=False
	                except:
	                    boo=True
	                    print("2 Exception Handled- Trying again")

	                if not ret:
	                    boo=True
	                    print("2 Trying again")
	                elif isinstance(ret,list):
	                    print("2 Return from sh ip route is a list, trying again")
	                    boo=True
	                elif len(ret.split())>3:
	                    boo=False
	                else:
	                    boo=True
	                    print("2 Trying again")
	            print("\tBGP- sh ip route for dst "+dst1)
	            print(ret)
	            ret=ret.split("\n")
	            fl=0
	            for i in ret:
	                i=i.split()
	                print("splitting ret")
	                print(i)
	                if i[0]=='*':
	                    nxt=i[1]
	                    if nxt=='directly':
	                        x=i.index('via')
	                        hop=i[x+1]
	                        fl=2
	                        break
	                    elif re.match('^(?:[0-9]{1,3}\.){3}([0-9]{1,3})',nxt):
	                        dst1=nxt
	                        if nxt[-1]==',':
	                            dst1=nxt[:-1]

	                        fl=1
	                        break
	        print("Name "+name+" BGP: next hop "+dst1+" exit interface "+hop)
	        extract.add(dst1)
	        s.add(dst1)
	        ls.append(dst1)
	        p=''
	        p=hop+' '+dst1
	        if name not in exit.keys():
	            exit[name]=set()
	        exit[name].add(p)
	        boo=True
	        while boo:
	            try:    
	                ret=ssh.send_command("sh ip int brief | include "+hop)
	                boo=False
	            except:
	                print("3 Exception Handled- Trying again")
	                boo=True
	            if not ret:
	                boo=True
	            elif isinstance(ret,list):
	                print("3 Return from sh ip int brief is a list, trying again")
	                boo=True
	            elif len(ret.split())<6:
	                boo=True
	            else:
	                boo=False
	                
	        ip=ret.split()[1]



	        ctobj=dictofnames[name]
	        arr[ctobj].addexit(p)
	        dictofobj[name].adddictip(hop,ip)
	        



	        p=''
	            
	    elif prot=='connected",':
	        boo=True
	        while boo:
	            try:
	                ret=ssh.send_command("sh ip route "+dst+" | include directly")
	                boo=False
	            except:
	                boo=True
	                print("4 Exception Handled- Trying again")
	            print(" Return from sh ip route dst i directly")
	            print(ret)
	            if not ret:
	                boo=True
	                print("4 Return from show ip route dst is null, Trying again")
	            
	            elif isinstance(ret,list):
	                print("4 Return from sh ip route directly is a list, trying again")
	                boo=True
	            elif len(ret.split())>3:
	                boo=False
	            else:
	                print("4 Trying again")
	                boo=True
	        
	        print("Connected route- show ip route| i directly ")
	        print(ret)
	        ret=ret.split()
	        p=''
	        x=ret.index('via')
	        p=ret[x+1]
	        hop=ret[x+1]
	        p=p+' directly'
	        if name not in exit.keys():
	                exit[name]=set()
	        exit[name].add(p)
	        print(" Name "+name+" is connected to dst via "+p)

	        ctobj=dictofnames[name]
	        arr[ctobj].addexit(p)
	        boo=True
	        while boo:
	            try:    
	                ret=ssh.send_command("sh ip int brief | include "+hop)
	                boo=False
	            except:
	                print("5 Exception Handled- Trying again")
	                boo=True
	            if not ret:
	                boo=True
	            elif isinstance(ret,list):
	                print("5 Return from sh ip int brief is a list, trying again")
	                boo=True
	            elif len(ret.split())<6:
	                boo=True
	            else:
	                boo=False
	            
	                
	        ip=ret.split()[1]

	        dictofobj[name].adddictip(hop,ip)

	        p=''

	    else:
	        boo=True
	        while boo:
	            try:
	                ret=ssh.send_command("sh ip route "+dst+" | include via")
	                boo=False
	            except:
	                boo=True
	                print("6 Exception Handled- Trying again")
	            print(" Return from sh ip route")
	            print(ret)
	            if not ret:
	                boo=True
	                print("6 Trying again")
	            
	            elif isinstance(ret,list):
	                print("6 Return from sh ip route is a list, trying again")
	                boo=True
	            elif len(ret.split())>3:
	                boo=False
	            else:
	                print("6 Trying again")
	                boo=True
	        print("output from sh ip route | inc via ")
	        print(ret)
	        print("Splitting")
	        ret=ret.split('\n')
	        for i in ret:
	            i=i.split()
	            print(i)
	            t=0
	            for j in i:
	                #print(j)
	                if re.match('^(?:[0-9]{1,3}\.){3}([0-9]{1,3})',j):
	                    print("extract- "+j[:-1])
	                    j=j[:-1]
	                    if j not in extract:
	                        extract.add(j)
	                        s.add(j)
	                        ls.append(j)     
	                    t=1
	               
	                if t==1:
	                    num=i.index('via')
	                    p=i[num+1]
	                    hop=i[num+1]
	                    p=p+' '+j
	                    if name not in exit.keys():
	                        exit[name]=set()
	              
	                    exit[name].add(p)

	                    ctobj=dictofnames[name]
	                    #print("ctobj "+ctobj)
	                    arr[ctobj].addexit(p)
	                    print("hop ",hop)
	                    boo=True
	                    ret1=""
	                    while boo:
	                        try:    
	                            ret1=ssh.send_command("sh ip int brief | include "+hop)
	                            boo=False
	                        except:
	                            print("6-2 Exception Handled- Trying again")
	                            boo=True
	                        print(" Return ")
	                        print(ret1)
	                        print(len(ret1.split()))
	                        #print(ret1.split()[0])
	                        if not ret1:
	                            boo=True
	                        elif isinstance(ret1,list):
	                            print("6-2 Return from sh ip int brief is a list, trying again")
	                            boo=True
	                        elif len(ret1.split())<5:
	                            boo=True
	                        else:
	                            boo=False
	                
	                    ip=ret1.split()[1]
	                    dictofobj[name].adddictip(hop,ip)

	                    p=''
	                    break
	    extract.clear()
	    s.remove(now)
	    ls.remove(now)
	 
	    if now!=src:
	        boo=True
	        while boo:
	            try:    
	                ret=ssh.send_command("sh ip int brief | include "+now)
	                boo=False
	            except:
	                print("7  Exception Handled- Trying again")
	                boo=True
	            print(" return from sh ip int brief | inc dest at dest ")
	            print(ret)
	            print(len(ret.split()))
	            if not ret:
	                print("7 null")
	                boo=True
	            elif isinstance(ret,list):
	                print("7 Return from sh ip route is a list, trying again")
	                boo=True
	            elif len(ret.split())<6:
	                boo=True
	                print("7 Trying Again")
	            else:
	                boo=False
	        print(" Name "+name+" sh ip int brief | include "+now)
	        print(ret)
	        ret=ret.split()
	 
	        if name not in entry.keys():
	            entry[name]=set()
	        p=''
	        p=ret[0]
	        hop=ret[0]
	        ip=now
	        p=p+' '+now
	        entry[name].add(p)

	        ctobj=dictofnames[name]
	        arr[ctobj].addentry(p)
	        dictofobj[name].adddictip(hop,ip)
	        

	        entryrev[now]=set()
	        p=''
	        p=name+' '+ret[0]
	        entryrev[now].add(p)
	     
	boo=True
	while boo:
	        try:
	            #device = {"device_type": "autodetect","host":dst,"username": "******","password":"******"}
	            #guesser = SSHDetect(**device)
	            #best_match = guesser.autodetect()
	            #print(best_match,guesser.potential_matches)
	            ssh=ConnectHandler(device_type="cisco_ios",host=dst,username="******",password="******")
	            boo=False
	        except Exception as e:
	            boo=True
	            print(" Connection error, trying again ",e,dst)



	boo=True
	while boo:
	    try:
	        name=ssh.find_prompt()
	        boo=False
	    except Exception as e:
	        print(str(e))
	        print("Find Prompt errTrying Again")
	        boo=True
	        
	name=name[:-1]
	honame.add(name)

	if name not in setofnames:
	    setofnames.add(name)
	    dictofnames[name]=count
	    k1=router(name)
	    arr.append(k1)
	    dictofobj[name]=k1
	    dictofobj[name].addconnect(ssh)
	    count+=1


	boo=True
	while boo:
	    try:
	        ret=ssh.send_command("sh ip int brief | include "+dst)
	        boo=False
	    except:
	        print("8 Exception Handled- Trying again")
	        boo=True
	    print(" return from sh ip int brief | inc dest ")
	    print(ret)
	    if not ret:
	        boo=True
	    elif isinstance(ret,list):
	        print("8 Return from sh ip int brief is a list, trying again")
	        boo=True
	    elif len(ret.split())<6:
	        boo=True
	        print("8 Trying Again")
	    else:
	        boo=False

	ret=ret.split()
	#print(ret)
	if name not in entry.keys():
	    entry[name]=set()
	p=''
	p=ret[0]
	p=p+' '+'directly'
	entry[name].add(p)

	hop=ret[0]
	ip=dst

	ctobj=dictofnames[name]
	arr[ctobj].addentry(p)
	dictofobj[name].adddictip(hop,ip)

	p=''
	entryrev['directly']=set()
	p=name+' '+ret[0]
	entryrev['directly'].add(p)

	print("Entry interfaces ")
	print(entry)
	print()
	print(" Exit  interfaces ")
	print(exit)

	print()
	print(" Entry Reverse ")
	print(entryrev)

	ff=0


	for nme in setofnames:
	    ssh=dictofobj[nme].handle

	    #general_node_parameters

	    boo=True

	    while boo:
	        ff=0
	        try:
	            ret=ssh.send_command("sh version",use_textfsm=True)
	            print("Sh version Exec")
	            boo=False
	        except Exception as e:
	            print(" 9-0 Exception raised is show version, trying again ")
	            print(e)
	            boo=True
	            ff=1
	            try:
	                #device = {"device_type": "autodetect","host":dictofobj[nme].sship,"username": "******","password":"******"}
	                #guesser = SSHDetect(**device)
	                #best_match = guesser.autodetect()
	                #print(best_match,guesser.potential_matches)
	                ssh=ConnectHandler(device_type="cisco_ios",host=dictofobj[nme].sship,username="******",password="******")
	            except Exception as ee:
	                print("Exception raised again")
	                print(ee)

	        print("Return from Show version")
	        print(ret)
	        if ff==1:
	            boo=True
	        elif not ret:
	            print(" 9-0 Return from show version is null. Trying again ")
	            boo=True
	        elif not(isinstance(ret,list)):
	            print(" 9-0 Return from show version is not proper. Trying again ")
	            boo=True
	        else:
	            boo=False

	    verdict={}
	    verdict['soft_ver']=ret[0]['soft_ver']
	    verdict['version']=ret[0]['version']
	    verdict['uptime']=ret[0]['uptime']    
	    verdict['hardware']=ret[0]['hardware'][0]        
	    verdict['reload_reason']=ret[0]['reload_reason']
	    dictofobj[nme].gennodedict['version']=verdict 
	    







	    
	    boo=True
	    while boo:
	        try:
	            ret=ssh.send_command("sh proc cpu | ex 0.0",use_textfsm=True)
	            boo=False
	        except:
	            print("9 Exception Raised , Trying again")
	            boo=True
	        if not(isinstance(ret,list)):
	            boo=True
	            print("9 return from sh proc cpu not proper, trying again")
	        else:
	            boo=False
	            
	    #parse the return from show environment and take out parameters like
	    ct1=0
	    for line in ret:
	        if ct1==0:
	            cpu={}
	            cpu['cpu_5_sec']=line['cpu_5_sec']
	            cpu['cpu_1_min']=line['cpu_1_min']
	            cpu['cpu_5_min']=line['cpu_5_min']
	            dictofobj[nme].gennodedict['CPU']=cpu                
	            
	        combine={}
	        combine['process']=line['process']
	        combine['proc_5_sec']=line['proc_5_sec']
	        combine['proc_1_min']=line['proc_1_min']
	        combine['proc_5_min']=line['proc_5_min']
	        dictofobj[nme].gennodedict[line['pid']]=combine      
	        ct1+=1


	#parsing sh ip route

	    boo=True
	    while boo:
	        try:
	            ret=ssh.send_command("sh ip route")
	            boo=False
	        except:
	            print("10 Exception Raised , Trying again")
	            boo=True
	        print(ret)
	        if not ret:
	            boo=True
	        elif isinstance(ret,list):
	            print("10 Return from sh ip route is a list, trying again")
	            boo=True
	        else:
	            boo=False

	    ret=ret.split('\n')
	    gen={}
	    ct1=0
	    print("RETURN: " ,ret)
	    for line in ret:
	        print("LINE: ",line)
	        line2=line.split()
	        print("Splitted LINE: ",line2)
	        if not(not(line2)) and line2[0]!='S' and line2[0]!='C' and line2[0]!='S*' and 'via' in line2 and line2[0]=='D' and line2[0]=='B':
	            pos=line2.index('via')
	            if line2[pos+2][0:2]=='00':
	                ct1+=1
	                gen[ct1]=line
	                print(line)        
	    dictofobj[nme].gennodedict['ip_route_00']=gen
	                
	    #keys for gen dict is just numbers with no significance. display only values        
	    #dictofobj[nme].gennodedict['redundant_power']=



	    
	#-----------------------------------------Harshad------------------------------------------------------------------------------------------
	    boo=True
	    while boo:
	        ans=ans1=0
	        try:
	            ans=ssh.send_command("show ip protocols | include bgp")
	            ans1=ssh.send_command("show ip protocols | include eigrp")
	            boo=False
	        except:
	            print("9-2 Exception raised in sh ip protocols, trying again ")
	            boo=True
	        print(" sh ip protocols | i bgp ")
	        print(ans)
	        print(" sh ip protocols | i eigrp")
	        print(ans1)
	        if not(isinstance(ans,str)) or not(isinstance(ans1,str)):
	            boo=True
	            print("9-2 Return from sh ip protocols not proper. Trying again")
	        elif not ans and not ans1:
	            print("9-2  Return null from both protocols, trying again ")
	            boo=True
	        elif (not(ans1) and len(ans.split())<5) or (not(ans) and len(ans1.split())<5):
	            print(" 9-2-1 Return from sh ip protocols not proper. Trying again")
	            boo=True
	        elif not(not(ans)) and len(ans.split())<5 and not(not(ans1)) and len(ans1.split())<5:
	            print(" 9-2-3 Return from sh ip protocols not proper. Trying again")
	            boo=True
	        else:
	            boo=False
	            
	    
	    bgp=ans.split("\n")
	    eigrp=ans1.split("\n")
	    bgp_sub='"bgp'
	    eigrp_sub='"eigrp'
	    flag1=0
	    flag2=0
	    for text in bgp:
	        if bgp_sub in text:
	            flag1=1
	            break
	    for text in eigrp:
	        if eigrp_sub in text:
	            flag2=1
	            break
	    
	    if flag2==1:
	        print("eigrp there")
	      #call bgp func
	        boo=True
	        while boo:
	            try:
	                ans=ssh.send_command("show ip eigrp neighbors")
	                boo=False
	            except:
	                print("9-3 Exception handled. Error in sh ip eigrp neigh. Trying again ")
	                boo=True
	            
	            print(" Return from sh ip eigrp neighbors ")
	            print(ans)
	            if not ans:
	                print('Null returned from show ip eigrp neighbors')
	                boo=True
	            elif not(isinstance(ans,str)):
	                print('not a string, returned from show ip eigrp neighbors')
	                boo=True
	            elif len(ans.split())<3:
	                print('size less, returned from show ip eigrp neighbors')
	                boo=True
	            else:
	                boo=False
	        boo=True
	        while boo: 
	            try:
	                template=open('cisco_ios_show_ip_eigrp_neighbors.template')
	                res_template=textfsm.TextFSM(template)
	                ans_final=res_template.ParseText(ans)
	                boo=False
	            except Exception as e:
	                print(e)
	                print("9-4 Exception in Textfsm, Trying again")
	                boo=True
	        print(ans_final)
	        hello={}
	        j=0
	        dictofobj[nme].gennodedict['eigrp_neigh']=dict()
	        for i in range(0,len(ans_final)):
	            hello={}
	            hello['neighbor']=ans_final[i][1]
	            hello['uptime']=ans_final[i][4]
	            hello['srtt']=ans_final[i][5]
	            hello['rto']=ans_final[i][6]
	            #dictofobj[nme].gennodedict['eigrp_neigh']
	            dictofobj[nme].gennodedict['eigrp_neigh'][ans_final[i][1]]=dict()
	            dictofobj[nme].gennodedict['eigrp_neigh'][ans_final[i][1]]=hello
	  

	  
	    if flag1==1:
	        print("bgp there")
	        ans=ssh.send_command("show ip bgp summary")
	        print(ans)
	        template=open('cisco_ios_show_ip_bgp_summary.template')
	        res_template=textfsm.TextFSM(template)
	        ans_final=res_template.ParseText(ans)
	        print(ans_final)
	        hello={}
	        j=0
	        dictofobj[nme].gennodedict['bgp_neigh']=dict()
	        for i in range(0,len(ans_final)):
	            hello={}
	            hello['neighbor']=ans_final[i][2]
	            hello['AS']=ans_final[i][3]
	            hello['up/down']=ans_final[i][5]

	            dictofobj[nme].gennodedict['bgp_neigh'][ans_final[i][2]]=dict()
	            dictofobj[nme].gennodedict['bgp_neigh'][ans_final[i][2]]=hello
	            
	            
	        


	#----------------------------------------------------------------------------------------------------------------------------------------------


	#------------------------------------------Neeraj-----------------------------------------------------------------------------------------------



	    boo=True
	    while boo:

	        try:
	            ret = ssh.send_command("show proc mem | include Total")
	            boo=False
	        except:
	            print(" 9-4 Exception handled in sh proc mem | inc Pool Total. Trying Again")
	            boo=True
	        print("Return from show proc mem ")
	        print(ret)
	        if not ret:
	            print(" 9-4 Returned value is null. Trying again ")
	            boo=True
	        elif not(isinstance(ret,str)):
	            boo=True
	            print("9-4 Returned value is not string, trying again ")
	        elif ret.split()[0]!='Processor':
	            print("9-4 Returned value on show proc mem is not proper, trying again")
	        elif len(ret.split())<6:
	            print("9-4 Returned value on show proc mem is not proper, trying again")
	            boo=True
	        else:
	            boo=False
	        

	    def mb(str):
	        return round(int(str)/1024/1024,2)
	        #return 1
	        

	    def percent(a,b):
	        return round((int(a)/int(b)) * 100,2)
	        
	    memory = dict()
	    ret = ret.split('\n')
	    count=0
	    for line in ret:
	        count=count+1
	        if(count>2):
	            break;
	        temp_vals = line.split(' ')
	        vals = []
	        for string in temp_vals:
	            if len(string.strip())>0:
	                vals.append(string)
	        print(vals)
	        memory.update({vals[0]:{'total':mb(vals[3]),'used':mb(vals[5]),'free':mb(vals[7]),'percent':percent(vals[5],vals[3])}})   

	    dictofobj[nme].gennodedict['Process_Memory']=dict()
	    dictofobj[nme].gennodedict['Process_Memory']=memory
	    print(memory)




	    boo=True
	    while boo:
	        
	        try:
	            time1 = ssh.send_command("show clock")
	            boo=False
	        except:
	            print("9-4 Exception handled. sh clock. Trying again")
	            boo=True
	        print("Time ")
	        print(time1)
	        if not time1:
	            boo=True
	            print("9-4 Return from show clock not proper. Trying again ")
	        elif not(isinstance(time1,str)):
	            print("9-4  Return from show clock in not string. Trying again")
	            boo=True
	        elif len(time1.split())<5:
	            print("Time received not proper. Trying again ")
	            boo=True
	        else:
	            boo=False
	    
	    
	    time1=time1.split(" ")[3:5]
	    time1 = time1[0]+" "+time1[1]
	    print(time1)
	    #ret = src.send_command("show log | i down|Down|up|Up|err|fail|Fail|drop|crash|MALLOCFAIL|duplex",time[0]+" "+str((int(time[1])-1)))

	    boo=True
	    while boo:
	        try:
	            
	            ret = ssh.send_command("show log | i down|Down|err|fail|Fail|drop|crash|MALLOCFAIL|")
	            boo=False
	        except:
	            print("9-5 exception handled in show log. Trying again ")
	            boo=True
	        if not(isinstance(ret,str)):
	            print("9-5  Return from show log in not string. Trying again")
	            boo=True
	        else:
	            boo=False
	            
	    array = ret.split('\n')

	    count=0
	    syslog = dict()
	    for line in array:
	        if line.find('%')!=-1 and (line.find("NBRCHANGE")!=-1 or line.find("ADJCHANGE")!=-1 or line.find("UPDOWN")!=-1 or line.find("duplex")!=-1):
	            syslog.update({count:line})
	            count+=1
	    dictofobj[nme].gennodedict['log']=syslog


	#---------------------------------------------------------------------------------------------------------------------------------------
	#---------------------------------------------------ARAVIND-----------------------------------------------------------------------------


	    map_return = {}
	    print("For Spanning Tree KPI")
	    print(dictofobj[nme].gennodedict['version']['hardware'])
	    output_span=''
	    if dictofobj[nme].gennodedict['version']['hardware']=='3725':
	            
	        boo=True
	        while boo:
	            try:
	                output_span= ssh.send_command("sh spanning-tree active")
	                boo=False
	            except:
	                boo=True
	                print("9-6 Exception Raised in show spanning tree")
	            if not output_span:
	                print(" 9-6 Return from show spanning tree is null. Trying again ")
	                boo=True
	            elif not(isinstance(output_span,str)):
	                boo=True
	                print(" 9-6 Return from show spanning tree is not a string. Trying again ")
	                
	            elif len(output_span.split())<4:
	                boo=True
	                print(" 9-6 Return from show spanning tree is not proper. Trying again")
	            else:
	                boo=False
	        
	        l=output_span.split('\n')
	        print("Spanning LIST")
	        print(l)
	        flag = 0
	        p = 12
	        m1={}
	        for k in range(len(l)-6):
	            if (k == p):
	                print(" k ")
	                print(k)
	                print(" p ")
	                print(p)
	                print(l[k])
	                print(l[k + 6])
	                m1[l[k]] = l[k + 6]
	                p += 9
	        p = 12
	        time.sleep(20)
	        for lo in range(0, 2):
	            for k in range(len(l)-6):
	                if (k == p):
	                    print(" k ")
	                    print(k)
	                    print(" p ")
	                    print(p)
	                    # print(l[k])
	                    # print(l[k+6])
	                    if (m1[l[k]] != l[k + 6]):
	                        map_return[l[k]] = l[k + 6]
	                        print(l[k] + "\n" + l[k + 6])
	                        flag = 1
	                    else:
	                        print("No change Observed")
	                        flag = 0
	                    p += 9
	            p = 12
	            time.sleep(20)
	        if (flag == 0):
	            print("No Changes in the Past 1 minute")
	        flag = 0
	        print('\n\n\n')
	    dictofobj[nme].gennodedict['spanning_tree']=map_return








	    print("For show interface counters")
	    print(dictofobj[nme].gennodedict['version']['hardware'])
	    Int_count={}
	    if dictofobj[nme].gennodedict['version']['hardware']=='3725':
	        boo=True
	        while boo:
	            try:
	                command = ssh.send_command("sh int counters error | ex 0")
	                boo=False
	            except:
	                boo=True
	                print("9-7 Exception handled - sh int counters error, Trying again")
	            print("Return from show int counters error")
	            print(command)
	            if not command:
	                print("9-7 Return from sh int counters errors is null, trying again")
	                boo=True
	            elif not(isinstance(command,str)):
	                print("9-7 Return from sh int counters errors is not a string, trying again")
	                boo=True
	            elif len(command.split())<5:
	                boo=True
	                print("9-7 Return from sh int counters errors is not proper, trying again")
	            else:
	                boo=False
	                    
	        output = command.split('\n')
	        output.pop(0)
	        if (output[1]):
	            for i in output:
	                print(i + '\n')
	                Int_count = {i: 5 for i in output}
	        else:
	            print("Sorry Empty")
	        
	    dictofobj[nme].gennodedict['interface_counters_errors']=Int_count
















	#---------------------------------------------------------------------------------------------------------------------------------------------------



	    #interface_level_details
	    for interf in dictofobj[nme].dictint.keys():
	        print(interf+" in loop ")
	        boo=True
	        while boo:
	            try:
	                ret=ssh.send_command("sh interfaces "+interf,use_textfsm=True)
	                boo=False
	            except:
	                print("11 Exception Raised , Trying again")
	                boo=True
	                continue
	            print(ret)
	            if not ret:
	                boo=True
	            elif isinstance(ret,str):
	                print("11 output not in proper form, trying again ")
	                boo=True
	            else:
	                boo=False
	        #Parse the sh interface output and get the crc and other things out
	        print(ret)
	        line={}
	        for line in ret:
	            x=line.keys()
	            if 'crc' in x:
	                dictofobj[nme].dictint[interf]['crc']=line['crc']
	            if 'duplex' in x:
	                dictofobj[nme].dictint[interf]['duplex']=line['duplex']
	            if 'reliability' in x:
	                dictofobj[nme].dictint[interf]['reliability']=line['reliability']
	            if 'txload' in x:
	                dictofobj[nme].dictint[interf]['txload']=line['txload']
	            if 'rxload' in x:
	                dictofobj[nme].dictint[interf]['rxload']=line['rxload']
	            if 'speed' in x:
	                dictofobj[nme].dictint[interf]['speed']=line['speed']
	            if 'collisions' in x:
	                dictofobj[nme].dictint[interf]['collisions']=line['collisions']
	            if 'late_collision' in x:
	                dictofobj[nme].dictint[interf]['late_collision']=line['late_collision']
	            if 'overrun' in x:
	                dictofobj[nme].dictint[interf]['overrun']=line['overrun']
	            if 'interf_reset' in x:
	                dictofobj[nme].dictint[interf]['interf_reset']=line['interf_reset']
	            if 'input_errors' in x:
	                dictofobj[nme].dictint[interf]['input_errors']=line['input_errors']
	            if 'output_errors' in x:
	                dictofobj[nme].dictint[interf]['output_errors']=line['output_errors']
	            if 'frame' in x:
	                dictofobj[nme].dictint[interf]['frame']=line['frame']
	            if 'ignored' in x:
	                dictofobj[nme].dictint[interf]['ignored']=line['ignored']
	            if 'bandwidth' in x:
	                dictofobj[nme].dictint[interf]['bandwidth']=line['bandwidth']
	            if 'ignored' in x:
	                dictofobj[nme].dictint[interf]['output_drops']=line['output_drops']
	        



	        """boo=True
	        while boo:
	            try:
	                ret=ssh.send_command("sh controllers "+interf)
	                boo=False
	            except:
	                print("Exception Raised 2, Trying again")
	                boo=True
	            if not ret:
	                boo=True
	            elif len(boo.split('\n'))<4:
	                boo=True
	            else:
	                boo=False
	        #parse the ret and get the required parameters


	        dictofobj[nme].dictint[interf]['']=
	        dictofobj[nme].dictint[interf]['']=
	        dictofobj[nme].dictint[interf]['']=
	        dictofobj[nme].dictint[interf]['']=
	        dictofobj[nme].dictint[interf]['']="""
	        
	    forjson={}
	    forjson['Name']=dict()
	    #forjson['Name'][0]=dict()
	    forjson['Name']['0']=nme
	    forjson['Interface Dictionary']=dict()

	    forjson['Interface Dictionary']=dictofobj[nme].dictint
	    forjson['General Node']=dict()
	    forjson['General Node']=dictofobj[nme].gennodedict
	    intojson.append(forjson)
	    


	    ssh.disconnect()
	        

	#for r in arr:
	#    r.objprint()



	print( "FINAL OUTPUT ")
	print(exit)
	print(entryrev)
	print(intojson)
	return exit,entryrev,intojson
def unpack_job(which_machine, which_untrusted, next_directory, next_to_grade):

    # variables needed for logging
    obj = packer_unpacker.load_queue_file_obj(JOB_ID, next_directory,
                                              next_to_grade)
    partial_path = os.path.join(obj["gradeable"], obj["who"],
                                str(obj["version"]))
    item_name = os.path.join(obj["semester"], obj["course"], "submissions",
                             partial_path)
    is_batch = "regrade" in obj and obj["regrade"]

    # verify the DAEMON_USER is running this script
    if not int(os.getuid()) == int(DAEMON_UID):
        grade_items_logging.log_message(
            JOB_ID, message="ERROR: must be run by DAEMON_USER")
        raise SystemExit(
            "ERROR: the grade_item.py script must be run by the DAEMON_USER")

    if which_machine == 'localhost':
        address = which_machine
    else:
        address = which_machine.split('@')[1]

    fully_qualified_domain_name = socket.getfqdn()
    servername_workername = "{0}_{1}".format(fully_qualified_domain_name,
                                             address)
    target_results_zip = os.path.join(
        SUBMITTY_DATA_DIR, "autograding_DONE",
        servername_workername + "_" + which_untrusted + "_results.zip")
    target_done_queue_file = os.path.join(
        SUBMITTY_DATA_DIR, "autograding_DONE",
        servername_workername + "_" + which_untrusted + "_queue.json")

    if which_machine == "localhost":
        if not os.path.exists(target_done_queue_file):
            return False
        else:
            local_done_queue_file = target_done_queue_file
            local_results_zip = target_results_zip
    else:
        user, host = which_machine.split("@")
        ssh = paramiko.SSHClient()
        ssh.get_host_keys()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        try:
            ssh.connect(hostname=host, username=user, timeout=5)

            sftp = ssh.open_sftp()
            fd1, local_done_queue_file = tempfile.mkstemp()
            fd2, local_results_zip = tempfile.mkstemp()
            #remote path first, then local.
            sftp.get(target_done_queue_file, local_done_queue_file)
            sftp.get(target_results_zip, local_results_zip)
            #Because get works like cp rather tnan mv, we have to clean up.
            sftp.remove(target_done_queue_file)
            sftp.remove(target_results_zip)
            success = True
        #This is the normal case (still grading on the other end) so we don't need to print anything.
        except FileNotFoundError:
            os.remove(local_results_zip)
            os.remove(local_done_queue_file)
            success = False
        #In this more general case, we do want to print what the error was.
        #TODO catch other types of exception as we identify them.
        except Exception as e:
            grade_items_logging.log_stack_trace(job_id=JOB_ID,
                                                trace=traceback.format_exc())
            grade_items_logging.log_message(
                JOB_ID,
                message=
                "ERROR: Could not retrieve the file from the foreign machine "
                + str(e))
            print(
                "ERROR: Could not retrieve the file from the foreign machine.\nERROR: {0}"
                .format(e))
            os.remove(local_results_zip)
            os.remove(local_done_queue_file)
            success = False
        finally:
            os.close(fd1)
            os.close(fd2)
            sftp.close()
            ssh.close()
            if not success:
                return False
    # archive the results of grading
    try:
        success = packer_unpacker.unpack_grading_results_zip(
            which_machine, which_untrusted, local_results_zip)
    except:
        grade_items_logging.log_stack_trace(job_id=JOB_ID,
                                            trace=traceback.format_exc())
        grade_items_logging.log_message(
            JOB_ID,
            jobname=item_name,
            message=
            "ERROR: Exception when unpacking zip. For more details, see traces entry."
        )
        with contextlib.suppress(FileNotFoundError):
            os.remove(local_results_zip)
        success = False

    with contextlib.suppress(FileNotFoundError):
        os.remove(local_done_queue_file)

    msg = "Unpacked job from " + which_machine if success else "ERROR: failure returned from worker machine"
    print(msg)
    grade_items_logging.log_message(JOB_ID,
                                    jobname=item_name,
                                    which_untrusted=which_untrusted,
                                    is_batch=is_batch,
                                    message=msg)
    return True
Exemple #21
0
#!/usr/bin/env python
# coding: UTF-8

import paramiko

client_actt = paramiko.SSHClient()
#client_actt.set_missing_host_key_policy(paramiko.WarningPolicy())
client_actt.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client_actt.connect('192.168.8.77', username='******', password='******')

stdin, stdout, stderr = client_actt.exec_command('source ~/opencampas/oc.sh')

for o in stderr:
   print(o)
    
client_actt.close()
    	for item in check_list:
		hostport = item.split(":")
		host = hostport[0]
		port = int(hostport[1])
		# 检查主机信息
		key = ""
		user = ""
		passwd = ""

		day=datetime.now()
                nowday=day.weekday()
		#if str(nowday) in check_day.split(",") and not is_alias_host_eq :
		if str(nowday) :
        		tc = NewClient()
        		tc.load_system_host_keys()
        		tc._policy = paramiko.AutoAddPolicy()
			isDebug = func.get_config('global_value','isDebug')
			try:
				if not os.path.exists(key):
					key = func.key_path
				if not user :
					user = func.key_user

                		tc.connect(host,port=port, username=user, password=passwd,key_filename=key, timeout=10) 
				
				stdin, stdout, stderr, status = tc.call("ps aux|grep redis-server|grep -v 'grep' |awk '{print $2\"###\"$11\"###\"$12}'")
				data = stdout.read()
				res = data.split("\n")
				for line in res :
					if line :
						redis_info_dict = {}
Exemple #23
0
def async_ssh(cmd_dict):
    import paramiko
    from paramiko.buffered_pipe import PipeTimeout
    from paramiko.ssh_exception import (SSHException, PasswordRequiredException)
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    retries = 0
    while True:  # Be robust to transient SSH failures.
        try:
            # Set paramiko logging to WARN or higher to squelch INFO messages.
            logging.getLogger('paramiko').setLevel(logging.WARN)

            ssh.connect(hostname=cmd_dict['address'],
                        username=cmd_dict['ssh_username'],
                        port=cmd_dict['ssh_port'],
                        key_filename=cmd_dict['ssh_private_key'],
                        compress=True,
                        timeout=20,
                        banner_timeout=20)  # Helps prevent timeouts when many concurrent ssh connections are opened.
            # Connection successful, break out of while loop
            break

        except (SSHException,
                PasswordRequiredException) as e:

            print('[ dask-ssh ] : ' + bcolors.FAIL +
                  'SSH connection error when connecting to {addr}:{port}'
                  'to run \'{cmd}\''.format(addr=cmd_dict['address'],
                                            port=cmd_dict['ssh_port'],
                                            cmd=cmd_dict['cmd']) + bcolors.ENDC)

            print( bcolors.FAIL + '               SSH reported this exception: ' + str(e) + bcolors.ENDC )

            # Print an exception traceback
            traceback.print_exc()

            # Transient SSH errors can occur when many SSH connections are
            # simultaneously opened to the same server. This makes a few
            # attempts to retry.
            retries += 1
            if retries >= 3:
                print( '[ dask-ssh ] : '
                      + bcolors.FAIL
                      + 'SSH connection failed after 3 retries. Exiting.'
                      + bcolors.ENDC)

                # Connection failed after multiple attempts.  Terminate this thread.
                os._exit(1)

            # Wait a moment before retrying
            print('               ' + bcolors.FAIL +
                  'Retrying... (attempt {n}/{total})'.format(n=retries, total=3) +
                  bcolors.ENDC)

            time.sleep(1)

    # Execute the command, and grab file handles for stdout and stderr. Note
    # that we run the command using the user's default shell, but force it to
    # run in an interactive login shell, which hopefully ensures that all of the
    # user's normal environment variables (via the dot files) have been loaded
    # before the command is run. This should help to ensure that important
    # aspects of the environment like PATH and PYTHONPATH are configured.

    print('[ {label} ] : {cmd}'.format(label=cmd_dict['label'],
                                       cmd=cmd_dict['cmd']))
    stdin, stdout, stderr = ssh.exec_command('$SHELL -i -c \'' + cmd_dict['cmd'] + '\'', get_pty=True)

    # Set up channel timeout (which we rely on below to make readline() non-blocking)
    channel = stdout.channel
    channel.settimeout(0.1)

    def read_from_stdout():
        """
        Read stdout stream, time out if necessary.
        """
        try:
            line = stdout.readline()
            while len(line) > 0:    # Loops until a timeout exception occurs
                line = line.rstrip()
                logger.debug('stdout from ssh channel: %s', line)
                cmd_dict['output_queue'].put('[ {label} ] : {output}'.format(label=cmd_dict['label'],
                                                                             output=line))
                line = stdout.readline()
        except (PipeTimeout, socket.timeout):
            pass

    def read_from_stderr():
        """
        Read stderr stream, time out if necessary.
        """
        try:
            line = stderr.readline()
            while len(line) > 0:
                line = line.rstrip()
                logger.debug('stderr from ssh channel: %s', line)
                cmd_dict['output_queue'].put('[ {label} ] : '.format(label=cmd_dict['label']) +
                                             bcolors.FAIL + '{output}'.format(output=line) + bcolors.ENDC)
                line = stderr.readline()
        except (PipeTimeout, socket.timeout):
            pass

    def communicate():
        """
        Communicate a little bit, without blocking too long.
        Return True if the command ended.
        """
        read_from_stdout()
        read_from_stderr()

        # Check to see if the process has exited. If it has, we let this thread
        # terminate.
        if channel.exit_status_ready():
            exit_status = channel.recv_exit_status()
            cmd_dict['output_queue'].put('[ {label} ] : '.format(label=cmd_dict['label']) +
                                         bcolors.FAIL +
                                         "remote process exited with exit status " +
                                         str(exit_status) + bcolors.ENDC)
            return True

    # Wait for a message on the input_queue. Any message received signals this
    # thread to shut itself down.
    while cmd_dict['input_queue'].empty():
        # Kill some time so that this thread does not hog the CPU.
        time.sleep(1.0)
        if communicate():
            break

    # Ctrl-C the executing command and wait a bit for command to end cleanly
    start = time.time()
    while time.time() < start + 5.0:
        channel.send(b'\x03')  # Ctrl-C
        if communicate():
            break
        time.sleep(1.0)

    # Shutdown the channel, and close the SSH connection
    channel.close()
    ssh.close()
def launch_instances(arg1, arg2, arg3):
    """
        Function Name        : launch_instances
        Function Description : Launches an Instance in OpenStack Environment.
        
        Inputs   : 
            arg1                    - Image (imageName) which is mandatory to launch an Instance.
            arg2                    - Flavour to be used to launch an Instance.
            arg3                    - Name of the Instance to be created.
        Outputs  : 
            an Instance is being created with desired Image ,Flavour and Instance Name.
    """

    host = '10.53.173.114'  #server Ip Address
    username = '******'  #server User Name
    password = '******'  #server Password
    ssh_obj = paramiko.SSHClient()
    ssh_obj.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ssh_obj.connect(host, username=username, password=password,
                    timeout=10)  #SSH to the server.
    print "Connected successfully to the Host :", host
    time.sleep(2)
    print "Launching Instance ", arg3, " with Image ", arg1, " and Flavour ", arg2
    # Creating Instances.
    stdin, stdout, stderr = ssh_obj.exec_command(
        "source keystonerc_admin && nova boot --image " + arg1 + " --flavor " +
        arg2 +
        " --nic port-id=d65e484f-515b-4664-baf4-955e8a98017f --nic port-id=9edf3d21-62f5-4cca-8211-733ec1c8794c "
        + arg3)
    time.sleep(5)
    errout = stderr.read()
    if stdout:
        data = stdout.read()
        print "Instance ", arg3, " Launched Successfully"
        #print 'Failed to execute command'
        #print data
        #return errout
    else:
        # return stdout.read()
        #data = stdout.read()
        print 'Failed to Launch Instance ', arg3, ' with Image ', arg1, ' and Flavour ', arg2
        #print errout
    '''
    time.sleep(10)
    print "List of Instances:"
    stdin, stdout1, stderr1 = ssh_obj.exec_command("source keystonerc_admin && nova list")   
    time.sleep(5)
    errout = stderr1.read()
    if stdout1:
        print stdout1.read()
        
        #return errout
    else:
        # return stdout.read()
        print errout 
    attaching_ip_address(1,arg3)
    '''

    stdin, stdout, stderr = ssh_obj.exec_command(
        "source keystonerc_admin && nova list")
    time.sleep(5)
    errout = stderr.read()
    if stdout:
        data = stdout.read()
        print "Instances List"
        #print 'Failed to execute command'
        print data
        #return errout
    else:
        # return stdout.read()
        #data = stdout.read()
        print 'Failed to display Instances list'
        #print errout

    if arg3 in data:
        items = re.findall((arg3 + ".*$"), data, re.MULTILINE)
        for ip in items:
            #print ip
            ip = ip.split('|')
            #print ip[0]
            #print ip[-2]
            ext_mgmtIP = ip[-2]
            ext_mgmtIP = ext_mgmtIP.split('=')
            ext_mgmtIP = (ext_mgmtIP[-1]).split(" ")
            ext_mgmtIP = str(ext_mgmtIP[0])
            #Copying the Ip address to a file in the server.
            stdin, stdout, stderr = ssh_obj.exec_command(
                "source keystonerc_admin && echo " + "'" + arg3 + " " +
                ext_mgmtIP + "'" + " >>robot_input_openstack.txt")
            print "IP address ", ext_mgmtIP, "is copied to robot_input_openstack.txt "

    ssh_obj.close()
Exemple #25
0
    def connect_session(self, portNo, ftpPassword,consolePassword, databasePassword,devicePassword ,OsFile, configFile, willClone, updateGui):
        try:
            # Get values from user input
            ftpAddress = getFtpAddress()
            osFile = getOsPath() + OsFile
            confPath = getIniConfPath() + configFile
            username = getConsoleName() + ":" + str(portNo)
            hostname = getConsoleAddress()

            # Connect to the console server
            ssh = paramiko.SSHClient()
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh.connect(hostname, 22, username, consolePassword)
            term = ssh.invoke_shell()

            # Log into the device
            if updateGui:
                self.trigger.emit(11)
            waitForTerm(term, 60, "login:"******"set cli screen-length 0")

            # Get device serial number
            originalVersion = send_command(term, "show system software")
            xml = send_command(term, "show chassis hardware | display xml")
            serialNo = parse_xml_serial(xml)

            # Push serial number to the database
            updatedTime = pushSerial(getDatabaseAddress(), getDatabaseUsername(), databasePassword,getDatabase(), configFile, serialNo, confPath)
            if updateGui:
                self.trigger.emit(44)

            # Upgrade JUNOS
            ftpDetails = getFtpUsername() + ":" + ftpPassword + "@" + ftpAddress
            upgradeOs = "request system software add ftp://" + ftpDetails + osFile + " no-copy no-validate reboot"
            send_command(term, upgradeOs)

            # Wait for the device to reboot
            if updateGui:
                self.trigger.emit(55)
            print("upgrading")
            waitForTerm(term, 180, "login:"******"finished")
            waitForLogin(term,devicePassword)
            if updateGui:
                self.trigger.emit(66)
            waitForTerm(term, 2, "root")
            send_command(term, "set cli screen-length 0")

            # Snapshot to the backup partition
            if willClone:
                send_command(term, "request system snapshot media internal slice alternate")
            time.sleep(15)
            waitForTerm(term, 60, "root")

            # Check the version of JUNOS
            updatedVersion = send_command(term, "show system software")
            if updateGui:
                self.trigger.emit(77)

            # Start applying a configuration to the device
            if not updatedVersion == originalVersion:
                send_command(term, "configure")
                time.sleep(2)
                send_command(term, "delete")
                time.sleep(2)
                send_command(term, "yes")
                time.sleep(2)
                # Get the configuration file from the FTP Server
                send_command(term, "load set ftp://" + ftpAddress + confPath)
                time.sleep(2)
                xml = send_command(term, "show snmp location | display xml")
                time.sleep(2)

                # Get device deployment location (rollNo)
                rollNo = ""
                try:
                    xml = xml.split("<rpc-reply")[1]
                    xml = "<rpc-reply" + xml
                    xml = xml.split("</rpc-reply>")[0]
                    xml += "</rpc-reply>"
                    xmlDict = xmltodict.parse(xml)
                    rollNo = xmlDict['rpc-reply']['configuration']['snmp']['location']
                except:
                    print ("No location data.")
                time.sleep(5)

                # Push roll number (deployment location) to the database
                pushRollNo(getDatabaseAddress(), getDatabaseUsername(), databasePassword,getDatabase(),rollNo, updatedTime)
                time.sleep(5)

                #Set device root password
                send_command(term,"set system root-authentication plain-text-password")
                send_command(term,devicePassword)
                send_command(term,devicePassword) #confirm password
                time.sleep(2)

                #Commit the current configuration
                send_command(term, "commit and-quit")
                waitForTerm(term, 60, "root@")
                send_command(term, "request system autorecovery state save")
                time.sleep(30)
                send_command(term, "request system configuration rescue save")
                time.sleep(30)

                # Update the progress bar
                if updateGui:
                    self.trigger.emit(88)

                # Reboot the device
                send_command(term, "request system reboot")
                time.sleep(2)
                send_command(term, "yes")

                #Wait for the device to boot
                waitForTerm(term, 180, "login:"******"root")
                xml = send_command(term, "show configuration snmp location | display xml")
                time.sleep(5)

                # Get device deployment location (rollNo) - in order to perform a final check
                checkRollNo=""
                try:
                    xml = xml.split("<rpc-reply")[1]
                    xml = "<rpc-reply" + xml
                    xml = xml.split("</rpc-reply>")[0]
                    xml += "</rpc-reply>"
                    xmlDict = xmltodict.parse(xml)
                    checkRollNo = xmlDict['rpc-reply']['configuration']['snmp']['location']
                except:
                    print("No location data.")

                if rollNo == checkRollNo:
                    print("Deployment successful.")
                    send_command(term, "request system halt in 0")
                    time.sleep(2)
                    send_command(term, "yes")

                if updateGui:
                    self.trigger.emit(100)
            else:
                print("OS wasn't updated correctly, Not applying config, Shutting down")

        except paramiko.ssh_exception.BadHostKeyException:
            secondaryWindows.messageWindow("Host Key Error!", "Server’s host key could not be verified", True)
        except paramiko.ssh_exception.AuthenticationException:
            secondaryWindows.messageWindow("Authentication Error!", "Authentication failed, Check your details and try again", True)
        except paramiko.ssh_exception.SSHException:
            secondaryWindows.messageWindow("Unknown Error!", "Unknown error connecting or establishing an SSH session", True)
        except socket.gaierror as e:
            print(str(e))
Exemple #26
0
 def __init__(self):
     super(LServer, self).__init__()
     self.client = paramiko.SSHClient()
     self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
     self.chan = None
     self.error = 'No error founds while connecting'
Exemple #27
0
def main():
    module = AnsibleModule(argument_spec=dict(
        outputfile=dict(required=True),
        host=dict(required=True),
        username=dict(required=True),
        password=dict(required=True, no_log=True),
        enablePassword=dict(required=False, no_log=True),
        deviceType=dict(required=True),
        interfaceRange=dict(required=False),
        interfaceArg1=dict(required=True),
        interfaceArg2=dict(required=False),
        interfaceArg3=dict(required=False),
        interfaceArg4=dict(required=False),
        interfaceArg5=dict(required=False),
        interfaceArg6=dict(required=False),
        interfaceArg7=dict(required=False),
    ),
                           supports_check_mode=False)

    username = module.params['username']
    password = module.params['password']
    enablePassword = module.params['enablePassword']
    interfaceRange = module.params['interfaceRange']
    interfaceArg1 = module.params['interfaceArg1']
    interfaceArg2 = module.params['interfaceArg2']
    interfaceArg3 = module.params['interfaceArg3']
    interfaceArg4 = module.params['interfaceArg4']
    interfaceArg5 = module.params['interfaceArg5']
    interfaceArg6 = module.params['interfaceArg6']
    interfaceArg7 = module.params['interfaceArg7']
    outputfile = module.params['outputfile']
    hostIP = module.params['host']
    deviceType = module.params['deviceType']
    output = ""
    if not HAS_PARAMIKO:
        module.fail_json(msg='paramiko is required for this module')

    # Create instance of SSHClient object
    remote_conn_pre = paramiko.SSHClient()

    # Automatically add untrusted hosts (make sure okay for security policy in your environment)
    remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    # initiate SSH connection with the switch
    remote_conn_pre.connect(hostIP, username=username, password=password)
    time.sleep(2)

    # Use invoke_shell to establish an 'interactive session'
    remote_conn = remote_conn_pre.invoke_shell()
    time.sleep(2)

    # Enable and enter configure terminal then send command
    output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)

    output = output + cnos.enterEnableModeForDevice(enablePassword, 3,
                                                    remote_conn)

    # Make terminal length = 0
    output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2,
                                                 remote_conn)

    # Go to config mode
    output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#",
                                                 2, remote_conn)

    # Send the CLi command
    if (interfaceArg1 == "port-aggregation"):
        output = output + cnos.portChannelConfig(
            remote_conn, deviceType, "(config)#", 2, interfaceArg1,
            interfaceArg2, interfaceArg3, interfaceArg4, interfaceArg5,
            interfaceArg6, interfaceArg7)
    else:
        output = output + cnos.interfaceConfig(
            remote_conn, deviceType, "(config)#", 2, "port-aggregation",
            interfaceRange, interfaceArg1, interfaceArg2, interfaceArg3,
            interfaceArg4, interfaceArg5, interfaceArg6, interfaceArg7)

    # Save it into the file
    file = open(outputfile, "a")
    file.write(output)
    file.close()

    # Logic to check when changes occur or not
    errorMsg = cnos.checkOutputForError(output)
    if (errorMsg is None):
        module.exit_json(changed=True,
                         msg="Port Aggregation configuration is done")
    else:
        module.fail_json(msg=errorMsg)
Exemple #28
0
def connect(user, host, port, sock=None):
    """
    Create and return a new SSHClient instance connected to given host.

    If ``sock`` is given, it's passed into ``SSHClient.connect()`` directly.
    Used for gateway connections by e.g. ``HostConnectionCache``.
    """
    from state import env, output

    #
    # Initialization
    #

    # Init client
    client = ssh.SSHClient()

    # Load system hosts file (e.g. /etc/ssh/ssh_known_hosts)
    known_hosts = env.get('system_known_hosts')
    if known_hosts:
        client.load_system_host_keys(known_hosts)

    # Load known host keys (e.g. ~/.ssh/known_hosts) unless user says not to.
    if not env.disable_known_hosts:
        client.load_system_host_keys()
    # Unless user specified not to, accept/add new, unknown host keys
    if not env.reject_unknown_hosts:
        client.set_missing_host_key_policy(ssh.AutoAddPolicy())

    #
    # Connection attempt loop
    #

    # Initialize loop variables
    connected = False
    password = get_password()
    tries = 0

    # Loop until successful connect (keep prompting for new password)
    while not connected:
        # Attempt connection
        try:
            tries += 1
            client.connect(
                hostname=host,
                port=int(port),
                username=user,
                password=password,
                key_filename=key_filenames(),
                timeout=env.timeout,
                allow_agent=not env.no_agent,
                look_for_keys=not env.no_keys,
                sock=sock
            )
            connected = True

            # set a keepalive if desired
            if env.keepalive:
                client.get_transport().set_keepalive(env.keepalive)

            return client
        # BadHostKeyException corresponds to key mismatch, i.e. what on the
        # command line results in the big banner error about man-in-the-middle
        # attacks.
        except ssh.BadHostKeyException, e:
            raise NetworkError("Host key for %s did not match pre-existing key! Server's key was changed recently, or possible man-in-the-middle attack." % host, e)
        # Prompt for new password to try on auth failure
        except (
            ssh.AuthenticationException,
            ssh.PasswordRequiredException,
            ssh.SSHException
        ), e:
            msg = str(e)
            # For whatever reason, empty password + no ssh key or agent
            # results in an SSHException instead of an
            # AuthenticationException. Since it's difficult to do
            # otherwise, we must assume empty password + SSHException ==
            # auth exception. Conversely: if we get SSHException and there
            # *was* a password -- it is probably something non auth
            # related, and should be sent upwards.
            #
            # This also holds true for rejected/unknown host keys: we have to
            # guess based on other heuristics.
            if e.__class__ is ssh.SSHException \
                and (password or msg.startswith('Unknown server')):
                raise NetworkError(msg, e)

            # Otherwise, assume an auth exception, and prompt for new/better
            # password.

            # Paramiko doesn't handle prompting for locked private
            # keys (i.e.  keys with a passphrase and not loaded into an agent)
            # so we have to detect this and tweak our prompt slightly.
            # (Otherwise, however, the logic flow is the same, because
            # ssh's connect() method overrides the password argument to be
            # either the login password OR the private key passphrase. Meh.)
            #
            # NOTE: This will come up if you normally use a
            # passphrase-protected private key with ssh-agent, and enter an
            # incorrect remote username, because ssh.connect:
            # * Tries the agent first, which will fail as you gave the wrong
            # username, so obviously any loaded keys aren't gonna work for a
            # nonexistent remote account;
            # * Then tries the on-disk key file, which is passphrased;
            # * Realizes there's no password to try unlocking that key with,
            # because you didn't enter a password, because you're using
            # ssh-agent;
            # * In this condition (trying a key file, password is None)
            # ssh raises PasswordRequiredException.
            text = None
            if e.__class__ is ssh.PasswordRequiredException:
                # NOTE: we can't easily say WHICH key's passphrase is needed,
                # because ssh doesn't provide us with that info, and
                # env.key_filename may be a list of keys, so we can't know
                # which one raised the exception. Best not to try.
                prompt = "[%s] Passphrase for private key"
                text = prompt % env.host_string
            password = prompt_for_password(text)
            # Update env.password, env.passwords if empty
            set_password(password)
Exemple #29
0
def open_ssh_conn(ip):
    global check_sql
    #Change exception message
    try:

        #Define SSH parameters
        selected_user_file = open(user_file, 'r')

        #Starting from the beginning of the file
        selected_user_file.seek(0)

        #Reading the username from the file
        username = selected_user_file.readlines()[0].split(',')[0]

        #Starting from the beginning of the file
        selected_user_file.seek(0)

        #Reading the password from the file
        password = selected_user_file.readlines()[0].split(',')[1].rstrip("\n")

        #Logging into device
        session = paramiko.SSHClient()

        #For testing purposes, this allows auto-accepting unknown host keys #Do not use in production! The default would be RejectPolicy
        session.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        #Connect to the device using username and password
        session.connect(ip, username=username, password=password)

        #Start an interactive shell session on the router
        connection = session.invoke_shell()

        #Setting terminal length for entire output - disable pagination
        connection.send("terminal length 0\n")
        time.sleep(1)

        #Entering global config mode
        #connection.send("\n")
        #connection.send("configure terminal\n")
        #time.sleep(1)

        #Reading commands from within the script
        #Using the "\" line continuation character for better readability of the commands to be sent
        selected_cisco_commands = '''show version | include (, Version|uptime is|bytes of memory|Hz)&\
            show inventory&\
            show interfaces | include bia&\
            show processes cpu | include CPU utilization&\
            show memory statistics&\
            show ip int brief | include (Ethernet|Serial)&\
            show cdp neighbors detail | include Device ID&\
            show ip protocols | include Routing Protocol'''

        #Splitting commands by the "&" character
        command_list = selected_cisco_commands.split("&")

        #Writing each line in the command string to the device
        for each_line in command_list:
            connection.send(each_line + '\n')
            time.sleep(3)

        #Closing the user file
        selected_user_file.close()

        #Checking command output for IOS syntax errors
        output = connection.recv(65535)

        if re.search(r"% Invalid input detected at", output):
            print Fore.RED + "* There was at least one IOS syntax error on device %s" % ip
        else:
            print Fore.GREEN + "* All parameters were extracted from device %s" % ip,

            #Test for reading command output
            # print output + "\n"

############# Application #4 - Part #3 #############

#Extracting device parameters
#...starting with the ones destined to the NetworkDevices table in MySQL
        dev_hostname = re.search(r"(.+) uptime is", output)
        hostname = dev_hostname.group(1)
        #print hostname

        dev_mac = re.findall(r"\(bia (.+?)\)", output)
        #print dev_mac

        mac = dev_mac[0]
        #print mac
        dev_vendor = re.search(r"(.+?) (.+) bytes of memory", output)
        vendor = dev_vendor.group(1)
        #print vendor

        dev_model = re.search(r"(.+?) (.+?) (.+) bytes of memory", output)
        model = dev_model.group(2)
        #print model

        dev_image_name = re.search(r" \((.+)\), Version", output)
        image_name = dev_image_name.group(1)
        #print image_name

        dev_os = re.search(r"\), Version (.+),", output)
        os = dev_os.group(1)
        #print os

        serial_no = ""
        if len(re.findall(r"(.+), SN: (.+?)\r\n", output)) == 0:
            serial_no = "unknown"
        else:
            serial_no = re.findall(r"(.+), SN: (.+?)\r\n",
                                   output)[0][1].strip()
        #print serial_no

        dev_uptime = re.search(r" uptime is (.+)\n", output)
        uptime = dev_uptime.group(1)
        uptime_value_list = uptime.split(', ')

        #Getting the device uptime in seconds
        y_sec = 0
        w_sec = 0
        d_sec = 0
        h_sec = 0
        m_sec = 0
        for j in uptime_value_list:
            if 'year' in j:
                y_sec = int(j.split(' ')[0]) * 31449600
            elif 'week' in j:
                w_sec = int(j.split(' ')[0]) * 604800
            elif 'day' in j:
                d_sec = int(j.split(' ')[0]) * 86400
            elif 'hour' in j:
                h_sec = int(j.split(' ')[0]) * 3600
            elif 'minute' in j:
                m_sec = int(j.split(' ')[0]) * 60
        total_uptime_sec = y_sec + w_sec + d_sec + h_sec + m_sec
        #print total_uptime_sec

        cpu_model = ""
        if re.search(r".isco (.+?) \((.+)\) processor(.+)\n", output) == None:
            cpu_model = "unknown"
        else:
            cpu_model = re.search(r".isco (.+?) \((.+)\) processor(.+)\n",
                                  output).group(2)
        #print cpu_model

        cpu_speed = ""
        if re.search(r"(.+?)at (.+?)MHz(.+)\n", output) == None:
            cpu_speed = "unknown"
        else:
            cpu_speed = re.search(r"(.+?)at (.+?)MHz(.+)\n", output).group(2)
        #print cpu_speed

        serial_int = ""
        if re.findall(r"Serial([0-9]*)/([0-9]*) (.+)\n", output) == None:
            serial_int = "no serial"
        else:
            serial_int = len(
                re.findall(r"Serial([0-9]*)/([0-9]*) (.+)\n", output))
        #print serial_int

        dev_cdp_neighbors = re.findall(r"Device ID: (.+)\r\n", output)
        all_cdp_neighbors = ','.join(dev_cdp_neighbors)
        #print all_cdp_neighbors

        dev_routing_pro = re.findall(r"Routing Protocol is \"(.+)\"\r\n",
                                     output)
        #print dev_routing_pro
        is_internal = []
        is_external = []
        for protocol in dev_routing_pro:
            if 'bgp' in protocol:
                is_external.append(protocol)
            else:
                is_internal.append(protocol)

        internal_pro = ','.join(is_internal)
        external_pro = ','.join(is_external)
        #print internal_pro
        #print external_pro

        ############# Application #4 - Part #4 #############

        ### CPU ###
        dev_cpu_util_per5min = re.search(
            r"CPU utilization for five seconds: (.+) five minutes: (.+?)%",
            output)
        cpu_util_per5min = dev_cpu_util_per5min.group(2)
        #print cpu_util_per5min

        #Append CPU value for each device to the cpu_values list
        cpu_values.append(int(cpu_util_per5min))

        #Get top 3 CPU devices
        top3_cpu[hostname] = cpu_util_per5min

        ### Processor Memory ###
        dev_used_proc_mem = re.search(r"Processor(.+)\n ", output)
        dev_used_proc_mem = dev_used_proc_mem.group(1)
        #print dev_used_proc_mem

        total_proc_mem = dev_used_proc_mem.split('   ')[2].strip()
        used_proc_mem = dev_used_proc_mem.split('   ')[3].strip()
        #print total_proc_mem
        #print used_proc_mem

        #Get percentage of used proc mem
        proc_mem_percent = format(
            int(used_proc_mem) * 100 / float(total_proc_mem), ".2f")
        #print proc_mem_percent

        #Append used proc memory values for each device to the mem_values list
        proc_mem_values.append(float(proc_mem_percent))

        #Get top 3 proc memory devices
        top3_proc_mem[hostname] = proc_mem_percent

        ### I/O Memory ###
        dev_used_io_mem = re.search(r" I/O(.+)\n", output)
        dev_used_io_mem = dev_used_io_mem.group(1)
        #print dev_used_io_mem

        total_io_mem = dev_used_io_mem.split('    ')[2].strip()
        used_io_mem = dev_used_io_mem.split('    ')[3].strip()
        #print total_io_mem
        #print used_io_mem

        #Get percentage of used proc mem
        io_mem_percent = format(
            int(used_io_mem) * 100 / float(total_io_mem), ".2f")
        #print io_mem_percent

        #Append used I/O memory values for each device to the mem_values list
        io_mem_values.append(float(io_mem_percent))

        #Get top 3 I/O memory devices
        top3_io_mem[hostname] = io_mem_percent

        ### UP Interfaces ###

        dev_total_int = re.findall(r"([A-Za-z]*)Ethernet([0-9]*)(.+)YES(.+)\n",
                                   output)
        total_int = len(dev_total_int)
        #print total_int
        dev_total_up_int = re.findall(
            r"(.+)Ethernet([0-9]*)/([0-9]*)[\s]*(.+)up[\s]*up", output)
        total_up_int = len(dev_total_up_int)
        #print total_up_int

        #Get percentage of Eth UP interfaces out of the total number of Eth interfaces
        intf_percent = format(total_up_int * 100 / float(total_int), ".2f")
        #print intf_percent

        #Append percentage of UP interfaces for each device to the upint_values list
        upint_values.append(float(intf_percent))

        #Get top 3 UP Eth interfaces density devices
        top3_upint[hostname] = intf_percent

        #Insert/Update if exists all network devices data into the MySQL database table NetworkDevices. Calling sql_connection function
        sql_connection(
            "REPLACE INTO NetworkDevices(Hostname,MACAddr,Vendor,Model,Image,IOSVersion,SerialNo,Uptime,CPUModel,CPUSpeed,SerialIntfNo,CiscoNeighbors,IntRoutingPro,ExtRoutingPro) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
            (hostname, mac, vendor, model, image_name, os, serial_no,
             total_uptime_sec, cpu_model, cpu_speed, serial_int,
             all_cdp_neighbors, internal_pro, external_pro))

        #Closing the SSH connection
        session.close()
    except paramiko.AuthenticationException:
        print Fore.RED + "* Invalid SSH username or password. \n* Please check the username/password file or the device configuration!\n"
        check_sql = False
Exemple #30
0
 def _set_client(self):
     self.client = paramiko.SSHClient()
     self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
     self.client.connect(self.host_ip, username=self.user,
                         password=self.password)