예제 #1
0
class Fabapi(object):
    def __init__(self, hostip):
        connect_kwargs = {
            'pkey': settings.DEPLOYKEY,
            # 'password':''
        }
        self.c = Connection(host=hostip,
                            user=settings.DEPLOYUSER,
                            port=22,
                            **connect_kwargs)

    def locald(self, ld):
        return self.c.local(ld)

    def remoted(self, rd, sudoif=0):
        # if sudoif == 1:
        #     return sudo(rd, pty=False)
        # else:
        #     return run(rd)
        return self.c.run(rd, pty=False, shell=False)

    def getfile(self, local_dest, remote_dest):
        self.c.get(remote_dest, local_dest)

    def putfile(self, local_dest, remote_dest):
        self.c.put(local_dest, remote_dest, mode=0o664)

    def isexists(self, rootdir):
        res = int(
            self.c.run(" [ -e {0} ] && echo 1 || echo 0".format(rootdir),
                       shell=False))
        return res
예제 #2
0
def test_4_fabric_download():
    remove(testfilename) if isfile(testfilename) else None
    c = Connection(host=hostname, user=username, connect_kwargs={'password': password})
    c.get(remotepath)
    assert open(testfilename).readline() == 'testline'
    c.run(f'rm {remotepath}')
    remove(testfilename)
예제 #3
0
def add_new_node_ssh():
    nodes_new = {'172.17.0.12': '111111'}
    node_num = 3
    with open('/etc/hosts', 'a') as f:
        for ip, pwd in node_new.items():
            f.write('%s    node%s' % (ip, node_num))
            node_num += 1
            c = Connection(ip,
                           port=22,
                           user='******',
                           connect_kwargs={'password': pwd})
            c.get('/root/.ssh/id_rsa.pub', '/root/.ssh/id_rsa.pub.bak')
            c.local(
                'cat /root/.ssh/id_rsa.pub.bak >> /root/.ssh/authorized_keys')
            c.local('rm -f /root/.ssh/id_rsa.pub.bak')
    nodes.update(nodes_new)
    for ip, pwd in nodes.items():
        c = Connection(ip,
                       port=22,
                       user='******',
                       connect_kwargs={'password': pwd})
        c.run('rm -f /etc/hosts')
        c.put('/etc/hosts', '/etc/hosts')
        a = c.local('find /root/.ssh/ -name authorized_keys')
        if a.stdout.find('authorized_keys') != -1:
            c.run('rm -f /root/.ssh/authorized_keys')
        c.put('/root/.ssh/authorized_keys', '/root/.ssh/authorized_keys')
    print('over')
예제 #4
0
def do_download(connection: Connection, download: dict, backup_filename: str,
                remote_zip_path: str, remote_zip_md5: str) -> None:
    if download["enable"]:
        print("==> downloading compressed file to local directory")

        # Generate absolute download directory path for local
        download_path = path.realpath("{}/{}".format(download["path"],
                                                     backup_filename))

        # Download remote compressed zip file to local download directory
        connection.get(remote_zip_path, download_path)

        # Compare the remote zip and local zip base on md5sum value
        command = connection.local("md5sum {}".format(download_path),
                                   hide=True)
        local_zip_md5 = command.stdout.split(" ")[0]

        if remote_zip_md5 == local_zip_md5:
            print("==> sum is match")
        else:
            print("==> sum is not match")

        # Remove the remote zip if remove is true
        if download["remove"]:
            print("==> removing compressed file")

            connection.run("rm -f {}".format(remote_zip_path))
예제 #5
0
def createKeys(user,publicIPs,destinationPath,keyFile):
	print("\nCreate the keys for each instance")
	#Remove files if they exists
	if os.path.exists('keys/id_rsa.pub'): os.remove('keys/id_rsa.pub')
	if os.path.exists('keys/authorized_keys'): os.remove('keys/authorized_keys')
	if os.path.exists('keys/authorized_keys_updated'): os.remove('keys/authorized_keys_updated')

	for IPAddress in publicIPs:
		print IPAddress
		connection = Connection(host=IPAddress, user=user, connect_kwargs = {'key_filename': ['' + keyFile + ''] })

		if checkFileExists(user, IPAddress, keyFile, '/home/'+ user +'/.ssh/id_rsa.pub'):
			print("Key already exists for {}".format(IPAddress))
		else:
			connection.run("echo -ne '\n' | ssh-keygen -t rsa")

		connection.get('/home/'+user+'/.ssh/authorized_keys',local=destinationPath+'authorized_keys')
		connection.get('/home/'+user+'/.ssh/id_rsa.pub',local=destinationPath+'id_rsa.pub')

		#Write the keys to authorized keys updated file
		keyFilePath = open('keys/id_rsa.pub')
		authorizedKeyFile = open('keys/authorized_keys')
		authorizedKeyFileUpdated = open('keys/authorized_keys_updated','a')

		for line in keyFilePath:
			authorizedKeyFileUpdated.write(line)
		for line in authorizedKeyFile:
			authorizedKeyFileUpdated.write(line)
예제 #6
0
def _update_settings(c: Connection, source_folder, sitename):
    settings_path = source_folder + '/superlists/settings.py'
    secret_key_path = source_folder + '/superlists/secret_key.py'
    loc_tmp_dir = 'tmp_remote_settings'
    loc_new_settings_path = loc_tmp_dir + '/settings.py'
    loc_old_settings_path = loc_tmp_dir + '/settings_old.py'
    loc_secret_key_path = loc_tmp_dir + '/secret_key.py'
    os.mkdir(loc_tmp_dir)
    try:
        c.get(settings_path,
              local=os.getcwd() + '/' + loc_tmp_dir + '/settings.py')
        os.rename(loc_tmp_dir + '/settings.py', loc_old_settings_path)
        with open(loc_old_settings_path, 'r') as f:
            content = f.read()
            new_content = re.sub("DEBUG = True", "DEBUG = False", content)
            new_content = re.sub(r'ALLOWED_HOSTS = \[.*\]',
                                 'ALLOWED_HOSTS = ["{}"]'.format(sitename),
                                 new_content)
            new_content = re.sub(r"SECRET_KEY = '.*'",
                                 'from .secret_key import SECRET_KEY',
                                 new_content)
            with open(loc_new_settings_path, 'w') as nf:
                nf.write(new_content)
            if not _exists(c, secret_key_path):
                chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
                key = ''.join(random.SystemRandom().choice(chars)
                              for _ in range(50))
                with open(loc_secret_key_path, 'w') as nkey:
                    nkey.write('SECRET_KEY = "{}"'.format(key))
                c.put(os.getcwd() + '/' + loc_secret_key_path,
                      remote=source_folder + '/superlists/')
            c.put(os.getcwd() + '/' + loc_new_settings_path,
                  remote=source_folder + '/superlists/')
    finally:
        shutil.rmtree(loc_tmp_dir)
예제 #7
0
def backup(connection: fabric.Connection, filename: str) -> None:
    current_date = datetime.datetime.now().strftime(
        src.constants.GENERIC_DATE_FORMAT)
    name, extension = os.path.splitext(filename)

    with connection.cd(GlobalConfig.project_name):
        connection.get(f'{GlobalConfig.project_name}/{filename}',
                       f'backup_{name}_{current_date}{extension}')
예제 #8
0
 def get_licence(self):
     self.get_bittid()
     try:
         connect = Connection(host=self.licence_host, port=self.licence_port, user=self.licence_user, connect_kwargs={'password': self.licence_password})
     except Exception as e:
         return e
     ini_file_path = self.config.get('path_to_save_licence') + 'license.ini'
     line = self.licence_line.format(self.bittID, self.licence_date)
     f = open(ini_file_path, 'w')
     f.writelines(line)
     f.close()
     connect.put(ini_file_path, self.licence_path + 'license.ini')
     connect.run("cd %s && openssl rsautl -encrypt -in license.ini -inkey test_pub.key -pubin -out license_info" % self.licence_path)
     connect.get(self.licence_path + 'license_info', self.config.get('path_to_save_licence') + 'license_info')
예제 #9
0
def get_logs(key, host_addr, group):
    str_date = datetime.now().strftime('%s')
    # print(len(host_addr))
    for i, row in enumerate(host_addr):
        id = row['student_id']
        id = id + ((int)(group) * len(host_addr))
        id = (str)(id)
        h = row['ip_address']
        # print(id)
        # print(type(id))
        # print(h)
        # print(group)

        try:
            print("Start get_logs function")
            c = Connection(host=h,
                           user="******",
                           port=22,
                           connect_timeout=2,
                           connect_kwargs={"key_filename": key})
            print("Success connection host: " + h)

            #vmname = get_vmname(c)
            #print("Success get_vmname function: "+vmname)

            backup_dir = "/root/log/" + group + "/" + id.zfill(
                3) + "/" + str_date
            c.local("mkdir -p " + backup_dir, warn=True)
            print("Create backup_dir locally: " + backup_dir)

            # c.run("mkdir -p "+backup_dir, warn=True)
            # print("Create backup_dir on remote: "+backup_dir)

            c.run("tar czf /root/script.tar.gz -C /var/log/ script", warn=True)
            print("Create script.tar.gz on remote")

            c.get("/root/script.tar.gz", backup_dir + "/script.tar.gz")
            print("Get script.tar.gz on remote")

            c.run("rm -rf /root/script.tar.gz", warn=True)
            print("Delete script.tar.gz on remote")

            c.get("/root/.command_history", backup_dir + "/.command_history")
            print("Get .command_history on remote")

            print("Finish get_logs function")
        except socket.timeout:
            continue
예제 #10
0
    def _logs(self, hosts):
        # Delete local logs (if any).
        cmd = CommandMaker.clean_logs()
        subprocess.run([cmd], shell=True, stderr=subprocess.DEVNULL)

        # Download log files.
        progress = progress_bar(hosts, prefix='Downloading logs:')
        for i, host in enumerate(progress):
            c = Connection(host, user='******', connect_kwargs=self.connect)
            c.get(PathMaker.node_log_file(i), local=PathMaker.node_log_file(i))
            c.get(PathMaker.client_log_file(i),
                  local=PathMaker.client_log_file(i))

        # Parse logs and return the parser.
        Print.info('Parsing logs and computing performance...')
        return LogParser.process(PathMaker.logs_path())
def stop_vid(request):
    print('a=', a)
    sleep(10)
    cmd = " sudo pkill motion"
    c = Connection(host=pi_ip, user='******', connect_kwargs={'password': '******'})
    c.run(cmd)
    if a == 1:
        var = UserVids(author=request.user, postdate=timezone.now(),session=request.user.logged_in_user.session_key)
        os.system("mkdir ./runcode/data/videos/"+filename_global)
        f2save = c.get('/home/pi/runcode/data/videos/' + filename_global +'/' + f + '.mp4','./runcode/data/videos/'+filename_global+'/'+f + '.mp4')
        fopen = open('./runcode/data/videos/' + filename_global +'/' + f + '.mp4', 'rb')
        var.uservid.save('videos/'+filename_global+'/'+ f + '.mp4', File(fopen))
        cmd2 = "echo rpi | sudo -S rm -rf ./runcode/data/videos"
        c.run(cmd2)
        c.close()
        cmd2 = " python3 /home/pi/runcode/stopit.py"
        p = subprocess.Popen("sshpass -p rpi ssh -p22 pi@" + pi_ip + cmd2,
                             stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
        p.communicate()
        #os.remove(os.getcwd() + ')
        #return HttpResponseRedirect('/runcode/')
        return redirect('/runcode')
    else:
        #return HttpResponseRedirect('/runcode/')
        return redirect('/runcode')
예제 #12
0
    class get:
        def setup(self):
            self.c = Connection('localhost')
            self.remote = self._support('file.txt')

        def base_case(self):
            # Copy file from support to tempdir
            # TODO: consider path.py for contextmanager
            cwd = os.getcwd()
            os.chdir(self.tmpdir)
            try:
                result = self.c.get(self.remote)
            finally:
                os.chdir(cwd)

            # Make sure it arrived
            local = self._tmp('file.txt')
            ok_(os.path.exists(local))
            eq_(open(local).read(), "yup\n")
            # Sanity check result object
            eq_(result.remote, self.remote)
            eq_(result.orig_remote, self.remote)
            eq_(result.local, local)
            eq_(result.orig_local, None)

        def file_like_objects(self):
            fd = BytesIO()
            result = self.c.get(remote=self.remote, local=fd)
            eq_(fd.getvalue(), b"yup\n")
            eq_(result.remote, self.remote)
            ok_(result.local is fd)

        def mode_preservation(self):
            # Use a dummy file which is given an unusual, highly unlikely to be
            # default umask, set of permissions (oct 641, aka -rw-r----x)
            local = self._tmp('funky-local.txt')
            remote = self._tmp('funky-remote.txt')
            with open(remote, 'w') as fd:
                fd.write('whatever')
            os.chmod(remote, 0o641)
            self.c.get(remote=remote, local=local)
            eq_(stat.S_IMODE(os.stat(local).st_mode), 0o641)
예제 #13
0
    class get:
        def setup(self):
            self.c = Connection('localhost')
            self.remote = self._support('file.txt')

        def base_case(self):
            # Copy file from support to tempdir
            # TODO: consider path.py for contextmanager
            cwd = os.getcwd()
            os.chdir(self.tmpdir)
            try:
                result = self.c.get(self.remote)
            finally:
                os.chdir(cwd)

            # Make sure it arrived
            local = self._tmp('file.txt')
            ok_(os.path.exists(local))
            eq_(open(local).read(), "yup\n")
            # Sanity check result object
            eq_(result.remote, self.remote)
            eq_(result.orig_remote, self.remote)
            eq_(result.local, local)
            eq_(result.orig_local, None)

        def file_like_objects(self):
            fd = BytesIO()
            result = self.c.get(remote=self.remote, local=fd)
            eq_(fd.getvalue(), b"yup\n")
            eq_(result.remote, self.remote)
            ok_(result.local is fd)

        def mode_preservation(self):
            # Use a dummy file which is given an unusual, highly unlikely to be
            # default umask, set of permissions (oct 641, aka -rw-r----x)
            local = self._tmp('funky-local.txt')
            remote = self._tmp('funky-remote.txt')
            with open(remote, 'w') as fd:
                fd.write('whatever')
            os.chmod(remote, 0o641)
            self.c.get(remote=remote, local=local)
            eq_(stat.S_IMODE(os.stat(local).st_mode), 0o641)
예제 #14
0
파일: fabfile.py 프로젝트: anwen/anwen
def backup(c):
    c = Connection('aw')
    """ backup data from aw mongo and download"""
    with c.cd('/var/www/anwen/db'):
        c.run('. ~/.zshrc && python3 db_in_out.py -o')
        c.run('tar czf aw_yaml.tar.gz data')
    with c.cd('/var/www/anwen/docs/shares'):
        c.run('tar czf aw_md.tar.gz *.md')
    with c.cd('/var/www/anwen/static/upload/'):
        c.run('tar czf upload.tar.gz img')
    print('download yaml:')
    with CD(os.path.join(os.getcwd(), 'db/')):
        c.get('/var/www/anwen/db/aw_yaml.tar.gz', 'aw_yaml.tar.gz')
        c.local('tar zxf aw_yaml.tar.gz')
        c.local('rm aw_yaml.tar.gz')
    print('download md:')
    with CD(os.path.join(os.getcwd(), 'docs/shares/')):
        c.get('/var/www/anwen/docs/shares/aw_md.tar.gz', 'aw_md.tar.gz')
        c.local('tar zxf aw_md.tar.gz')
        c.local('rm aw_md.tar.gz')
    print('download img:')
    return
    with CD(os.path.join(os.getcwd(), 'static/upload/')):
        c.get('/var/www/anwen/static/upload/upload.tar.gz', 'upload.tar.gz')
        c.local('tar zxf upload.tar.gz img')
        c.local('rm upload.tar.gz')
예제 #15
0
파일: fabfile.py 프로젝트: anwen/anwen
def backup(c):
    c = Connection('aw')
    """ backup data from aw mongo """
    with c.cd('/var/www/anwen/db'):
        c.run('. ~/.zshrc && python3 db_in_out.py -o')
        c.run('tar czf aw_yaml.tar.gz data')
    with c.cd('/var/www/anwen/docs/shares'):
        c.run('tar czf aw_md.tar.gz *.md')
    with c.cd('/var/www/anwen/static/upload/'):
        c.run('tar czf upload.tar.gz img')
    print('download yaml:')
    with CD(os.path.join(os.getcwd(), 'db/')):
        c.get('/var/www/anwen/db/aw_yaml.tar.gz', 'aw_yaml.tar.gz')
        c.local('tar zxf aw_yaml.tar.gz')
        c.local('rm aw_yaml.tar.gz')
    print('download md:')
    with CD(os.path.join(os.getcwd(), 'docs/shares/')):
        c.get('/var/www/anwen/docs/shares/aw_md.tar.gz', 'aw_md.tar.gz')
        c.local('tar zxf aw_md.tar.gz')
        c.local('rm aw_md.tar.gz')
    print('download img:')
    return
    with CD(os.path.join(os.getcwd(), 'static/upload/')):
        c.get('/var/www/anwen/static/upload/upload.tar.gz', 'upload.tar.gz')
        c.local('tar zxf upload.tar.gz img')
        c.local('rm upload.tar.gz')
예제 #16
0
def backup():
    '''
    备份数据库
    '''
    # 配置sudo命令的密码
    config = Config(overrides={'sudo': {'password': remote_su_pass}})
    # 以明文方式配置用户登录密码
    conn = Connection(ip,
                      user=remote_user,
                      config=config,
                      connect_kwargs={
                          "allow_agent": False,
                          "password": remote_pass
                      })
    f = 'backup-%s.sql' % datetime.now().strftime('%y-%m-%d_%H.%M.%S')
    with conn.cd('/tmp'):
        conn.run(
            'mysqldump   --user={} --password=\'{}\' --single-transaction --routines --triggers --events  --skip-extended-insert {}>{}'
            .format(remote_sql_user, remote_sql_pass, remote_sql_db, f))
        conn.run('tar -czvf %s.tar.gz %s' % (f, f))
        conn.get('/tmp/%s.tar.gz' % f, 'backup/%s.tar.gz' % f)
        conn.run('rm -f %s' % f)
        conn.run('rm -f %s.tar.gz' % f)
예제 #17
0
    class get:

        def setup(self):
            self.c = Connection("localhost")
            self.remote = _support("file.txt")

        def base_case(self, tmpdir):
            # Copy file from support to tempdir
            with tmpdir.as_cwd():
                result = self.c.get(self.remote)

            # Make sure it arrived
            local = tmpdir.join("file.txt")
            assert local.check()
            assert local.read() == "yup\n"
            # Sanity check result object
            assert result.remote == self.remote
            assert result.orig_remote == self.remote
            assert result.local == str(local)
            assert result.orig_local is None

        def file_like_objects(self):
            fd = BytesIO()
            result = self.c.get(remote=self.remote, local=fd)
            assert fd.getvalue() == b"yup\n"
            assert result.remote == self.remote
            assert result.local is fd

        def mode_preservation(self, tmpdir):
            # Use a dummy file which is given an unusual, highly unlikely to be
            # default umask, set of permissions (oct 641, aka -rw-r----x)
            local = tmpdir.join("funky-local.txt")
            remote = tmpdir.join("funky-remote.txt")
            remote.write("whatever")
            remote.chmod(0o641)
            self.c.get(remote=str(remote), local=str(local))
            assert stat.S_IMODE(local.stat().mode) == 0o641
예제 #18
0
파일: transfer.py 프로젝트: fabric/fabric
    class get:
        def setup(self):
            self.c = Connection("localhost")
            self.remote = _support("file.txt")

        def base_case(self, tmpdir):
            # Copy file from support to tempdir
            with tmpdir.as_cwd():
                result = self.c.get(self.remote)

            # Make sure it arrived
            local = tmpdir.join("file.txt")
            assert local.check()
            assert local.read() == "yup\n"
            # Sanity check result object
            assert result.remote == self.remote
            assert result.orig_remote == self.remote
            assert result.local == str(local)
            assert result.orig_local is None

        def file_like_objects(self):
            fd = BytesIO()
            result = self.c.get(remote=self.remote, local=fd)
            assert fd.getvalue() == b"yup\n"
            assert result.remote == self.remote
            assert result.local is fd

        def mode_preservation(self, tmpdir):
            # Use a dummy file which is given an unusual, highly unlikely to be
            # default umask, set of permissions (oct 641, aka -rw-r----x)
            local = tmpdir.join("funky-local.txt")
            remote = tmpdir.join("funky-remote.txt")
            remote.write("whatever")
            remote.chmod(0o641)
            self.c.get(remote=str(remote), local=str(local))
            assert stat.S_IMODE(local.stat().mode) == 0o641
예제 #19
0
def runfile(put_get, user, pwd, ip, path1, path2):
    c = Connection(host=ip,
                   user=user,
                   connect_kwargs={'password': pwd},
                   connect_timeout=120)
    if put_get == "put":
        try:
            result = c.put(path1, path2)
            print '{} ------> put done'.format(ip, )
        except Exception as e:
            print '{} ------> {} put faild'.format(ip, e)
    else:
        try:
            #getFile = local_file
            #getSavePath = os.path.join(route_path,os.path.basename(local_file))
            result = c.get(path1, path2)
            print '{} ------> get done'.format(ip, )
        except Exception as e:
            print '{} ------> {} get faild'.format(ip, e)
예제 #20
0
def get_rdms_from_neci(iter, job_folder):
    from fabric import Connection
    remote_ip = os.getenv('REMOTE_MACHINE_IP')
    user = os.getenv('USER')
    c = Connection(remote_ip, user=user)

    molcas_WorkDir = os.getenv('MOLCAS_WorkDir')
    remote_WorkDir = os.getenv('REMOTE_NECI_WorkDir')
    neci_WorkDir = remote_WorkDir + job_folder + '/'
    print('Copying RDMs and NECI output from')
    print(neci_WorkDir)
    print(' to ')
    print(molcas_WorkDir)
    c.get(neci_WorkDir + 'TwoRDM_aaaa.1',
          local=molcas_WorkDir + 'TwoRDM_aaaa.1')  # ,local=molcas_WorkDir)
    c.get(neci_WorkDir + 'TwoRDM_abab.1',
          local=molcas_WorkDir + 'TwoRDM_abab.1')
    c.get(neci_WorkDir + 'TwoRDM_abba.1',
          local=molcas_WorkDir + 'TwoRDM_abba.1')
    c.get(neci_WorkDir + 'TwoRDM_bbbb.1',
          local=molcas_WorkDir + 'TwoRDM_bbbb.1')
    c.get(neci_WorkDir + 'TwoRDM_baba.1',
          local=molcas_WorkDir + 'TwoRDM_baba.1')
    c.get(neci_WorkDir + 'TwoRDM_baab.1',
          local=molcas_WorkDir + 'TwoRDM_baab.1')
    c.get(neci_WorkDir + 'out', local=molcas_WorkDir + 'neci.out')
    # iter=0
    with c.cd(neci_WorkDir):
        iter_folder = 'Iter_' + str(iter)
        c.run('mkdir {0}'.format(iter_folder))
        c.run('mv TwoRDM* {0}'.format(iter_folder))
        c.run('mv out {0}/neci.out'.format(iter_folder))
        c.run('cp FCIMCStats {0}/.'.format(iter_folder))
        c.run('tar -cf {0}.tar.gz {0}'.format(iter_folder, iter_folder))
    c.get(neci_WorkDir + iter_folder + '.tar.gz',
          local=molcas_WorkDir + iter_folder + '.tar.gz')
    # c.run('rm -r {0}'.format(neci_WorkDir))
    c.close()
예제 #21
0
class Remote(object):
    def __init__(self, env):
        if env.gateway != None:
            self.gatewayConn = Connection(
                env.gateway,
                user=env.user,
                connect_kwargs={"password": env.password})
            self.conn = Connection(env.address,
                                   user=env.user,
                                   gateway=self.gatewayConn,
                                   connect_kwargs={"password": env.password})
        else:
            self.conn = Connection(env.host,
                                   user=env.user,
                                   connect_kwargs={"password": env.password})
        # print('\n'.join(['%s:%s' % item for item in self.conn.__dict__.items()]))
        self.result = ""

    def __del__(self):
        pass

    def getResult(self):
        return self.result

    def mkdir(self, dir_remote):
        # 检查指定远程目录是否存在, 不存在则尝试创建
        if not self.checkpath(dir_remote):
            print("ready to mkdir: %s ..." % dir_remote)
            if self.conn.run("mkdir --mode=755 -p %s" % dir_remote).failed:
                return False

        return True

    def checkpath(self, filename):
        # print("check remote path %s ..." % filename)
        # 检查指定远程文件或目录是否存在
        if fab_exists(self.conn, filename) == True:
            # print("remote file '%s' exists ..." % filename)
            return True
        else:
            return False

    def rmfile(self, filename):
        # 删除指定远程文件或目录,略过根目录
        if filename == "/":
            return False

        if self.checkpath(filename):
            if self.conn.sudo("rm -rf %s" % filename).failed:
                return False
        return True

    def cleanup(self, dir_remote):
        if (dir_remote == "" or dir_remote == "/"
                or self.checkpath(dir_remote) == False):
            return False

        if self.conn.sudo("rm -rf %s" % (dir_remote + "/*")).failed:
            return False

        return True

    def cleanupLocal(self, dir_local, sudo=False):
        if (dir_local == "" or dir_local == "/"
                or self.checkpath(dir_local) == False):
            return False

        cmdLocal = "rm -rf %s" % (dir_local + "/*")

        if sudo == True:
            cmdLocal = "sudo -s " + cmdLocal

        if self.conn.local(cmdLocal).failed:
            return False

        return True

    # def rename(self, file_src, file_des):
    #     # 重命名指定远程文件或目录
    #     with settings(warn_only=True):
    #         if exists(file_src, True):
    #             sudo("mv %s %s" % (file_src, file_des))

    def upload(self, dir_local, dir_remote, force=False):
        # 上传文件到远程主机
        print("ready to upload file: %s" % dir_local)

        result_check = self.mkdir(dir_remote)
        if result_check == False:
            print("failed to mkdir: %s" % dir_remote)

        file_name = os.path.basename(dir_local)
        file_remote = dir_remote + file_name

        if self.checkpath(file_remote) == True and force == False:
            print("remote file exists, skip upload ...")
            return True

        try:
            self.conn.put(dir_local, dir_remote)
        except Exception:
            print("Exception: exception occurred when upload file ...")
            print(traceback.print_exc())
            return False

        # print '\n'.join(['%s:%s' % item for item in result.__dict__.items()])
        # print(result)
        # if result.failed:
        # abort("Aborting file upload task, task failed!")
        # else:
        print("end to upload.")
        # sudo("ls -l %s" % dir_remote)
        # 列表文件
        # self.conn.sudo("ls -l %s" % dir_remote)
        return True

    def download(self, dir_remote, dir_local):
        # 从远程主机下载文件
        print("ready to download file: %s " % dir_remote)

        if not exists(dir_local):
            print("ready to mkdir: %s ..." % dir_local)
            if self.conn.local("mkdir --mode=755 -p %s" % dir_local).failed:
                print("Error: failed to mkdir on local machine, path: %s" %
                      dir_local)
                return False

        baseName = os.path.basename(dir_remote)
        fileLocal = dir_local + baseName
        if exists(fileLocal):
            print("local file exists, skip download ...")
            return True

        isOk = True

        try:
            f = open(fileLocal, 'wb')
            self.result = self.conn.get(dir_remote, f)
        except Exception:
            print("Exception: exception occurred when download file ...")
            print("File: " + dir_remote)
            print(traceback.print_exc())
            isOk = False
        finally:
            if f:
                print("ready to close file: " + fileLocal)
                f.close()

        # if self.result.failed:
        #     print("Error: failed to download file ...")
        #     print("File: "+dir_remote)
        #     return False
        # print('\n'.join(['%s:%s' % item for item in result.__dict__.items()]))

        print("end to download.")
        # 列表文件
        # self.conn.local("ls -l %s" % dir_local)

        return isOk

    def sudo(self, cmd):
        # 在远程主机上以sudo权限运行指定命令
        print("start super run ...")
        try:
            self.result = self.conn.sudo("%s" % cmd)
            if self.result.failed:
                print("Error: failed to execute remote command ...")
                print("Command: " + cmd)
                return False
        except Exception:
            print(
                "Exception: exception occurred when executing remote command ..."
            )
            print("Command: " + cmd)
            print(traceback.print_exc())
            return False
        print("end super run.")
        return True

    def run(self, cmd):
        # 在远程主机上以当前用户身份运行指定命令
        print("start run ...")
        try:
            self.result = self.conn.run("%s" % cmd)
            if self.result.failed:
                print("Error: failed to execute remote command ...")
                print("Command: " + cmd)
                return False
        except Exception:
            print(
                "Exception: exception occurred when executing remote command ..."
            )
            print("Command: " + cmd)
            return False
        print("end run.")
        return True

    def local(self, cmd):
        # 在本地主机上运行指定命令
        print("start run ...")
        self.result = self.conn.local("%s" % cmd)
        if self.result.failed:
            print("Error: failed to execute local command ...")
            return False
        print("end run.")
        return True
예제 #22
0
c = Connection(
    host=mysql_ip,
    user="******",
    connect_kwargs={
        "key_filename": key_pair + ".pem",
    },
)
# sleep(10)

c.sudo("rm -f /var/lib/mysql-files/sqloutput.csv")
c.run("mysql -u dbds -pdbds  -D dbproj -e " +
      '"SELECT  * FROM reviews INTO OUTFILE ' +
      "'/var/lib/mysql-files/sqloutput.csv' FIELDS TERMINATED BY " +
      "',' LINES TERMINATED BY " + "'" + "\\" + "n" + "'" + ";" + '"')
c.sudo("cp /var/lib/mysql-files/sqloutput.csv data/sqloutput.csv")
c.get("data/sqloutput.csv")

# remove port 22 access
ec2_functions.remove_security_group_permissions(ec2, mysql_groupID,
                                                set_ip_permissions)

# Get MongoDB security group
f = open("mongo_groupID", "r")
mongo_groupID = f.read()
f.close

# set port 22 access
ec2_functions.set_security_group_permissions(ec2, mongo_groupID,
                                             set_ip_permissions)

# Get MongoDB IP address
예제 #23
0
from fabric import Connection, Config
import os

sources = ('..' + os.path.sep + 'sources' + os.path.sep)
config = Config(overrides={'sudo': {'password': f'fabric\n'}})

c = Connection('centos', config=config)
c.sudo('yum install unzip -y')
c.put(f"{sources + 'fabric-2.5.zip'}", remote=f'/home/{c.user}/')
c.run(f"unzip -o /home/{c.user}/fabric-2.5.zip")
c.run(f"rm -f /home/{c.user}/fabric-2.5.zip")
c.get(f"/home/{c.user}/a.txt")
c.get(f"/home/{c.user}/b.txt")
예제 #24
0
class SSH(object):
    """
    对 Fabric 的一个简单的封装:
        1. 屏蔽了一些暂时用不到的参数。
        2. 设置了一些对 debug 有利的默认参数
        3. 添加了额外的中文 docstring
    """
    def __init__(
        self,
        host: str,
        port: int,
        username: str,
        password: Optional[str] = None,
        key_file_obj: IO = None,
        key_file_path: Optional[Path] = None,
        key_file_passphrase: Optional[str] = None,
    ):
        """
        使用示例:
        ```python3
        # 1. 使用密码登录远程主机
        ssh_con = SSH("192.168.1.xxx", 22, username="******", password="******")

        # 2. 使用私钥登录远程主机(私钥没有设置 passphrase)
        ## 2.1 指定密钥位置
        ssh_con = SSH("192.168.1.xxx", 22, username="******", key_file_path=Path("~/.ssh/id_rsa"))
        ## 2.2 给出密钥的 IO 对象
        ssh_con = SSH("192.168.1.xxx", 22, username="******", key_file_obj=Path("~/.ssh/id_rsa").open(encoding='utf-8'))
        ssh_con = SSH("192.168.1.xxx", 22, username="******", key_file_obj=StringIO("<private-key-content>"))
        ```
        """
        connect_kwargs = dict()
        if key_file_obj is not None:
            private_key = paramiko.RSAKey.from_private_key(
                key_file_obj, key_file_passphrase)
            connect_kwargs['pkey'] = private_key
        elif key_file_path is not None:
            connect_kwargs = {
                "key_filename": str(key_file_path.resolve()),
                "passphrase": key_file_passphrase
            }
        elif password is not None:
            connect_kwargs['password'] = password
        else:
            raise KeyError("must given password/pkey/private_key")

        self.conn = Connection(host=host,
                               port=port,
                               user=username,
                               connect_kwargs=connect_kwargs)

    def open(self):
        """建立连接。
        使用 run/put/get 命令时,会自动创建连接。
        但是 cd 不行,因为 cd 只是修改本地 session 的东西
        """
        return self.conn.open()

    def close(self):
        """关闭连接"""
        return self.conn.close()

    @property
    def is_connected(self):
        return self.conn.is_connected

    def run(self, cmd: str, warn=False, hide=False, echo=True, **kwargs):
        """
        远程执行命令

        使用示例:
        ```python3
        # 1. 执行命令,打印出被执行的命令,以及命令的输出。命令失败抛出异常
        ssh_con.run("ls -al")
        # 2. 执行命令,命令失败只抛出 warn(对程序的运行没有影响),这样可以手动处理异常情况。
        result = ssh_con.run("ls -al", warn=True)
        if result.return_code != 0:  # 命名执行失败
            # 处理失败的情况

        # 3. 拉取 docker 镜像,只在命令失败的情况下,才输出 docker 命令的日志。
        result = ssh_con.run("docker pull xxxx", hide=True, warn=True)
        if result.return_code != 0:  # 运行失败
            logger.error(result.stdout)  # 打印出错误日志,这里 stdout 一般会包含 stderr
            # 然后考虑要不要抛出异常
        ```

        ==================
        注意!!!run/sudo 并不记录 cd 命令切换的路径!
        如果需要改变 self.cwd (当前工作目录),必须使用 self.cd() 函数,详细的用法参见该函数的 docstring

        官方文档:http://docs.pyinvoke.org/en/0.12.1/api/runners.html#invoke.runners.Runner.run
        :param cmd: 命令字符串
        :param warn: 命令非正常结束时,默认抛异常。如果这个为 True,就只发出 Warning,不抛异常
        :param hide: 是否隐藏掉命令的输出流(stdout/stderr)
        :param echo:是否回显正在运行的命令(最好选择回显,debug很有用)
        :param shell: 指定用于执行命令的 shell
        :param encoding: 字符集
        :return: 一个 Result 对象,该对象的主要参数有:
            command: 被执行的命令
            ok: A boolean equivalent to exited == 0.
            return_code: 命令返回值
            stdout: 命令的标准输出,是一个多行字符串
                程执行命令时可能无法区分 stdout/stderr,这时 stdout 会包含 stderr
        """
        return self.conn.run(command=cmd,
                             warn=warn,
                             hide=hide,
                             echo=echo,
                             **kwargs)

    def sudo(self, command, **kwargs):
        """以 sudo 权限执行命令
        如果设置了密码,就自动使用该密码。
        否则会要求在命令行输入密码(这对运维来说显然不可取)

        注意!!!run/sudo 并不记录 cd 命令切换的路径!
        如果需要改变 self.cwd (当前工作目录),必须使用 self.cd() 函数,详细的用法参见该函数的 docstring

        """
        return self.conn.sudo(command=command, **kwargs)

    def local(self, *args, **kwargs):
        """在本机执行命令"""
        return self.conn.local(*args, **kwargs)

    def cd(self, path: Union[Path, str]):
        """change dir
        self.run()/self.sudo() 命令不会记录由 `cd` 命令造成的工作目录改变,
        要使多个语句都在某个指定的路径下执行,就必须使用 self.cd(),
        (或者你手动在每个 run 指令前,加上 cd /home/xxx/xxx,显然不推荐这么干)

        重点!这是一个类似 open(xxx) 的函数,需要使用 with 做上下文管理。
        用法:
        ```
        with ssh_conn.cd("/tmp"):
            # do some thing
        ```
        出了这个 with 语句块,cd 就失效了。

        ---
        实际上就是给 with 语句块中的每个 run/sudo 命令,添加上 `cd xxx`
        """
        return self.conn.cd(str(path))

    @property
    def cwd(self):
        """currently work dir
        默认为空字符串,表示 $HOME
        """
        return self.conn.cwd

    def get(self,
            remote_file_path: Union[Path, str],
            local: Union[Path, IO] = None,
            preserve_mode: bool = True,
            mkdirs=False):
        """
        从远程主机获取文件到本地
        :param remote_file_path: 远程主机上的文件的路径(不会解析 `~` 符号!建议用绝对路径!)
        :param local: 将文件保存到本地的这个位置/flie-like obj。若未指定,会存放在当前工作目录下(os.getcwd())
        :param preserve_mode: 是否保存文件的 mode 信息(可读/可写/可执行),默认 True
        :param mkdirs: 如果路径不存在,是否自动创建中间文件夹。
        :return: 一个 Result 对象
        """
        if isinstance(local, Path):
            local_path_parent = local.parent
            if local_path_parent.exists() is False:
                if mkdirs:
                    local_path_parent.mkdir(parents=True)
                else:
                    raise FileNotFoundError(
                        "directory '{}' not exist!".format(local_path_parent))

        return self.conn.get(
            remote=str(remote_file_path),
            local=local,
            preserve_mode=preserve_mode,
        )

    def put(self,
            local: Union[Path, IO],
            remote_file_path: Union[Path, str] = Path("."),
            preserve_mode: bool = True,
            mkdirs=False):
        """
        将文件从本地传输给远程主机

        :param local: 本机的文件路径/ file-like obj
        :param remote_file_path: 将文件保存到远程主机上的这个路径下(不会解析 `~` 符号!建议用绝对路径!)
                                 默认传输到远程的 home 目录下
        :param preserve_mode: 是否保存文件的 mode 信息(可读/可写/可执行),默认 True
        :param mkdirs: 如果路径不存在,是否自动创建中间文件夹。
        :return: 一个 Result 对象,该对象不包含 ok 等属性。。。
        """
        if mkdirs:
            parent = Path(remote_file_path).parent
            self.conn.run("mkdir -p '{}'".format(parent))

        return self.conn.put(local=local,
                             remote=Path(remote_file_path).as_posix(),
                             preserve_mode=preserve_mode)

    def put_dir(self,
                local_dir_path: Path,
                remote_path: Union[Path, str] = Path("."),
                preserve_mode: bool = True,
                mkdirs=False):
        """
        将文件夹从本地传输给远程主机

        :param local_dir_path: 本机的文件夹路径
        :param remote_path: 远程主机中,已经存在的一个文件夹的路径(不会解析 `~` 符号!建议用绝对路径!)
                            默认传输到远程的 home 目录下
        :param preserve_mode: 是否保存文件的 mode 信息(可读/可写/可执行),默认 True
        :param mkdirs: 如果路径不存在,是否自动创建中间文件夹。
        :return
        """
        try:
            self.conn.run(f"test -d {Path(remote_path).as_posix()}")
        except UnexpectedExit:
            raise RuntimeError(
                "remote_path 必须是一个已经存在的文件夹路径!请给定正确的 remote_path,或者使用默认参数!")

        stream = tar_files(local_dir_path, c_type="gz", get_stream=True)
        tar_name = local_dir_path.resolve().name + ".tar.gz"
        stream.name = tar_name
        self.put(local=stream,
                 remote_file_path=Path(remote_path).as_posix(),
                 preserve_mode=preserve_mode,
                 mkdirs=mkdirs)
        with self.cd(remote_path):
            self.run("tar -ax -f {}".format(tar_name))
            self.run("rm {}".format(tar_name))
예제 #25
0
def check_status():
    parser = argparse.ArgumentParser(
        description="Submit task to yascheduler daemon")
    parser.add_argument('-j',
                        '--jobs',
                        required=False,
                        default=None,
                        nargs='*')
    parser.add_argument('-v',
                        '--view',
                        required=False,
                        default=None,
                        nargs='?',
                        type=bool,
                        const=True)
    parser.add_argument('-o',
                        '--convergence',
                        required=False,
                        default=None,
                        nargs='?',
                        type=bool,
                        const=True,
                        help='needs -v option')
    parser.add_argument('-i',
                        '--info',
                        required=False,
                        default=None,
                        nargs='?',
                        type=bool,
                        const=True)
    parser.add_argument('-k',
                        '--kill',
                        required=False,
                        default=None,
                        nargs='?',
                        type=bool,
                        const=True)

    args = parser.parse_args()
    config = ConfigParser()
    config.read(CONFIG_FILE)
    yac = Yascheduler(config)
    statuses = {
        yac.STATUS_TO_DO: "QUEUED",
        yac.STATUS_RUNNING: "RUNNING",
        yac.STATUS_DONE: "FINISHED"
    }
    local_parsing_ready, local_calc_snippet = False, False

    if args.jobs:
        tasks = yac.queue_get_tasks(jobs=args.jobs)
    else:
        tasks = yac.queue_get_tasks(status=(yac.STATUS_RUNNING,
                                            yac.STATUS_TO_DO))

    if args.view or args.kill:
        if not tasks:
            print('NO MATCHING TASKS FOUND')
            return
        ssh_custom_key = {}
        for filename in os.listdir(config.get('local', 'data_dir')):
            if not filename.startswith('yakey') or not os.path.isfile(
                    os.path.join(config.get('local', 'data_dir'), filename)):
                continue
            key_path = os.path.join(config.get('local', 'data_dir'), filename)
            pmk_key = RSAKey.from_private_key_file(key_path)
            print('LOADED KEY %s' % key_path)
            ssh_custom_key = {'pkey': pmk_key}
            break

    if args.convergence:
        try:
            from pycrystal import CRYSTOUT
            from numpy import nan
            local_parsing_ready = True
        except:
            pass

    if args.view:
        yac.cursor.execute(
            'SELECT task_id, label, metadata, ip FROM yascheduler_tasks WHERE status=%s AND task_id IN (%s);'
            % (yac.STATUS_RUNNING, ', '.join(
                [str(task['task_id']) for task in tasks])))
        for row in yac.cursor.fetchall():
            print("." * 50 + "ID%s %s at %s@%s:%s" %
                  (row[0], row[1], config.get('remote', 'user'), row[3],
                   row[2]['remote_folder']))
            ssh_conn = SSH_Connection(host=row[3],
                                      user=config.get('remote', 'user'),
                                      connect_kwargs=ssh_custom_key)
            try:
                result = ssh_conn.run('tail -n15 %s/OUTPUT' %
                                      row[2]['remote_folder'],
                                      hide=True)
            except UnexpectedExit:
                print('OUTDATED TASK, SKIPPING')
            else:
                print(result.stdout)

            if local_parsing_ready:
                local_calc_snippet = os.path.join(
                    config.get('local', 'data_dir'), 'local_calc_snippet.tmp')
                try:
                    ssh_conn.get(row[2]['remote_folder'] + '/OUTPUT',
                                 local_calc_snippet)
                except IOError as err:
                    continue
                calc = CRYSTOUT(local_calc_snippet)
                output_lines = ''
                if calc.info['convergence']:
                    output_lines += str(calc.info['convergence']) + "\n"
                if calc.info['optgeom']:
                    for n in range(len(calc.info['optgeom'])):
                        try:
                            ncycles = calc.info['ncycles'][n]
                        except IndexError:
                            ncycles = "^"
                        output_lines += "{:8f}".format(calc.info['optgeom'][n][0] or nan) + "  " + \
                                        "{:8f}".format(calc.info['optgeom'][n][1] or nan) + "  " + \
                                        "{:8f}".format(calc.info['optgeom'][n][2] or nan) + "  " + \
                                        "{:8f}".format(calc.info['optgeom'][n][3] or nan) + "  " + \
                                        "E={:12f}".format(calc.info['optgeom'][n][4] or nan) + " eV" + "  " + \
                                        "(%s)" % ncycles + "\n"
                print(output_lines)

    elif args.kill:
        if not args.jobs:
            print('NO JOBS GIVEN')
            return
        yac.cursor.execute(
            'SELECT ip FROM yascheduler_tasks WHERE status=%s AND task_id IN (%s);'
            % (yac.STATUS_RUNNING, ', '.join(
                [str(task['task_id']) for task in tasks])))
        for row in yac.cursor.fetchall():
            ssh_conn = SSH_Connection(host=row[0],
                                      user=config.get('remote', 'user'),
                                      connect_kwargs=ssh_custom_key)
            try:
                result = ssh_conn.run('pkill %s' % yac.RUNNING_MARKER,
                                      hide=True)
            except:
                pass

    elif args.info:
        for task in tasks:
            print('task_id={}\tstatus={}\tlabel={}\tip={}'.format(
                task['task_id'], statuses[task['status']], task['label'],
                task['ip'] or '-'))

    else:
        for task in tasks:
            print('{}   {}'.format(task['task_id'], statuses[task['status']]))

    yac.connection.close()

    if local_calc_snippet and os.path.exists(local_calc_snippet):
        os.unlink(local_calc_snippet)
예제 #26
0
class Xele(object):

    def __init__(self):
        self.ssh_connec = None
        self.prpcrypt = Prpcrypt()

    def fabric_xele(self, host):
        port = host['port']
        user = host['user']
        password = self.prpcrypt.decrypt(host['password'])
        host = host['ip']
        try:
            self.ssh_connec = Connection(host=host, port=port, user=user, connect_kwargs={'password': password})
            return self.ssh_connec
        except Exception as e:
            return str(e)

    def run_command(self, host, command):
        try:
            self.fabric_xele(host)
            result = self.ssh_connec.run(command, hide=True).stdout
            return result
        except Exception as e:
            return str(e)

    def get_file(self, host, source, destination):
        try:
            self.fabric_xele(host)
            self.ssh_connec.get(source, destination)
            return '%s [%s] get success' % (host['hostname'], source)
        except Exception:
            return '[ERROR] %s [%s] get failed' % (host['hostname'], source)

    def put_file(self, host, source, destination):
        try:
            self.fabric_xele(host)
            self.ssh_connec.put(source, destination)
            return '%s [%s] put success' % (host['hostname'], source)
        except Exception as e:
            # return e
            return '[ERROR] %s [%s] put failed' % (host['hostname'], source)

    def run_exec(self, table, command):
        s = time.clock()
        head = '================={}================='
        heads = []
        thread = []
        results = []
        if table == 'trade':
            init_table = TradeTable()
        elif table == 'md':
            init_table = MDTable()
        hosts = init_table.query.all()
        for host in hosts:
            tmp = host.getdict()
            t = Mythread(self.run_command, (tmp, command), self.run_command.__name__)
            heads.append(head.format(tmp.get('hostname')))
            thread.append(t)
        for i in thread:
            i.start()
        for i in thread:
            i.join()
        for i in range(len(thread)):
            results.append(heads[i])
            res = thread[i].get_result().split('\n')
            for j in res:
                rep = j.replace(' ', '&nbsp;')
                results.append(rep)
        print(time.clock() - s)
        return results
예제 #27
0
class CertToolEsxi(object):

    def __init__(self, esx_server: str, esx_user: str, esx_pass: str, output_folder: Path, verbose: bool = False):

        # Parameters
        self.esx_crt = '/etc/vmware/ssl/rui.crt'
        self.esx_key = '/etc/vmware/ssl/rui.key'
        self.remote_tmp = Path('/tmp')
        self.service_start_delay = 120

        # Variables
        self.esx_server = esx_server
        self.esx_user = esx_user
        self.esx_pass = esx_pass
        self.verbose = verbose

        # Create output folder
        self.output_folder = output_folder / self.esx_server
        if not self.output_folder.exists():
            self.output_folder.mkdir(parents=True)

        # Connection
        self.connection = Connection(host=esx_server, user=self.esx_user, connect_kwargs={"password": self.esx_pass})

    @contextmanager
    def __maintenance_mode(self):
        enabled = False
        cmd_get = 'esxcli system maintenanceMode get'
        cmd_enable = 'esxcli system maintenanceMode set --enable True --timeout 600'
        cmd_disable = 'esxcli system maintenanceMode set --enable False --timeout 600'
        try:
            result = self.connection.run(f'{cmd_get}', pty=True, hide=True)
            enabled = 'Enabled' in result.stdout
            if not enabled:
                result = self.connection.run(f'{cmd_enable}', pty=True, hide=True)
            yield
        finally:
            if not enabled:
                result = self.connection.run(f'{cmd_disable}', pty=True, hide=True)

    def __reboot(self):
        with self.__maintenance_mode():
            # Reboot host
            cmd = f'esxcli system shutdown reboot --delay=10 --reason="Installing new SSL certs"'
            result = self.connection.run(f"{cmd}", pty=True, hide=True)

            # Wait for reboot to complete
            print(f'Waiting for host {self.esx_server} to shutdown')
            host_down(hostname=self.esx_server)

            # Wait for host to respond
            print(f'Waiting for host {self.esx_server} to power up')
            host_up(hostname=self.esx_server)

            # Wait for services to start up
            print(f'Waiting {self.service_start_delay} seconds for services to start')
            sleep(self.service_start_delay)

    def install_selfsigned(self, ca_crt: x509, ca_key: x509):

        # Create folder
        date = datetime.now().strftime('%Y%m%d%H%M%S')
        cert_dir = self.output_folder / date
        if not cert_dir.exists():
            cert_dir.mkdir(parents=True)

        # File names
        cert_cfg_file = 'rui.cfg'
        cert_key_file = 'rui.key'
        cert_csr_file = 'rui.csr'
        cert_crt_file = 'rui.crt'
        cert_chain_file = 'rui-chain.crt'
        cert_crt_bak_file = 'rui.crt.bak'
        cert_key_bak_file = 'rui.key.bak'
        ca_crt_file = '../ca.crt'
        ca_key_file = '../ca.key'

        # Create local paths
        local_cfg = cert_dir / cert_cfg_file
        local_csr = cert_dir / cert_csr_file
        local_key = cert_dir / cert_key_file
        local_crt = cert_dir / cert_crt_file
        local_chain = cert_dir / cert_chain_file
        local_crt_bak = cert_dir / cert_crt_bak_file
        local_key_bak = cert_dir / cert_key_bak_file
        local_ca_crt = cert_dir / ca_crt_file
        local_ca_key = cert_dir / ca_key_file

        # Remote temporary files
        remote_cfg = str(self.remote_tmp / cert_cfg_file)
        remote_csr = str(self.remote_tmp / cert_csr_file)
        remote_crt = str(self.remote_tmp / cert_crt_file)
        remote_chain = str(self.remote_tmp / cert_chain_file)
        remote_key = str(self.remote_tmp / cert_key_file)
        remote_ca_crt = str(self.remote_tmp / ca_crt_file)
        remote_ca_key = str(self.remote_tmp / ca_key_file)

        # Write CA crt
        ca_crt_bytes = ca_crt.public_bytes(encoding=serialization.Encoding.PEM)
        local_ca_crt.write_bytes(ca_crt_bytes)
        self.connection.put(local=local_ca_crt, remote=remote_ca_crt)

        # Write CA key
        ca_key_bytes = ca_key.private_bytes(encoding=serialization.Encoding.PEM,
                                            format=serialization.PrivateFormat.PKCS8,
                                            encryption_algorithm=serialization.NoEncryption())
        local_ca_key.write_bytes(ca_key_bytes)
        self.connection.put(local=local_ca_key, remote=remote_ca_key)

        # Create host cfg
        config = create_cert_config(host=self.esx_server)
        local_cfg.write_text(config)
        self.connection.put(local=local_cfg, remote=remote_cfg)

        # Create certificate request
        cmd = f'openssl req ' \
              f'-new -nodes ' \
              f'-out {str(remote_csr)} ' \
              f'-keyout {str(remote_key)} ' \
              f'-config {str(remote_cfg)}'
        result = self.connection.run(f"{cmd}", pty=True, hide=True)
        self.connection.get(local=local_csr, remote=remote_csr)
        self.connection.get(local=local_key, remote=remote_key)

        # Sign CSR with CA key and crt
        cmd = f'openssl x509 ' \
              f'-req ' \
              f'-days 360 ' \
              f'-in {str(remote_csr)} ' \
              f'-CA {str(remote_ca_crt)} ' \
              f'-CAkey {str(remote_ca_key)} ' \
              f'-CAcreateserial ' \
              f'-out {str(remote_crt)} ' \
              f'-extfile {str(remote_cfg)} ' \
              f'-extensions v3_req'
        result = self.connection.run(f'{cmd}', pty=True, hide=True)
        self.connection.get(local=local_crt, remote=remote_crt)

        # Create certificate chain
        cmd = f'cat {str(remote_crt)} {str(remote_ca_crt)} > {str(remote_chain)}'
        result = self.connection.run(f'{cmd}', pty=True, hide=True)
        self.connection.get(local=local_chain, remote=remote_chain)

        # Backup current crt and key
        self.connection.get(local=local_crt_bak, remote=self.esx_crt)
        self.connection.get(local=local_key_bak, remote=self.esx_key)

        # Deploy new crt and key
        self.connection.put(local=local_chain, remote=self.esx_crt)
        self.connection.put(local=local_key, remote=self.esx_key)

        # Reboot host
        self.__reboot()

    def install_msca_signed(self, ca_server: str, ca_user: str, ca_pass: str):

        # Create folder
        date = datetime.now().strftime('%Y%m%d%H%M%S')
        cert_dir = self.output_folder / date
        if not cert_dir.exists():
            cert_dir.mkdir(parents=True)

        # File names
        cert_cfg_file = 'rui.cfg'
        cert_key_file = 'rui.key'
        cert_csr_file = 'rui.csr'
        cert_crt_file = 'rui.crt'
        cert_chain_file = 'rui-chain.crt'
        cert_crt_bak_file = 'rui.crt.bak'
        cert_key_bak_file = 'rui.key.bak'
        ca_crt_file = '../ca.crt'

        # Create local paths
        local_cfg = cert_dir / cert_cfg_file
        local_csr = cert_dir / cert_csr_file
        local_key = cert_dir / cert_key_file
        local_crt = cert_dir / cert_crt_file
        local_chain = cert_dir / cert_chain_file
        local_crt_bak = cert_dir / cert_crt_bak_file
        local_key_bak = cert_dir / cert_key_bak_file
        local_ca_crt = cert_dir / ca_crt_file

        # Remote temporary files
        remote_cfg = str(self.remote_tmp / cert_cfg_file)
        remote_csr = str(self.remote_tmp / cert_csr_file)
        remote_crt = str(self.remote_tmp / cert_crt_file)
        remote_chain = str(self.remote_tmp / cert_chain_file)
        remote_key = str(self.remote_tmp / cert_key_file)
        remote_ca_crt = str(self.remote_tmp / ca_crt_file)

        # Get the Microsoft Certificate Authority file
        ca_crt = get_msca_root_cert(hostname=ca_server, username=ca_user, password=ca_pass)
        local_ca_crt.write_bytes(ca_crt.public_bytes(encoding=serialization.Encoding.PEM))

        # Connect to Microsoft Certificate Authority
        cert_srv = Certsrv(server=ca_server, username=ca_user, password=ca_pass, cafile=str(local_ca_crt))
        cert_srv.check_credentials()
        self.connection.put(local=local_ca_crt, remote=remote_ca_crt)

        # Create host cfg
        config = create_cert_config(host=self.esx_server)
        local_cfg.write_text(config)
        self.connection.put(local=local_cfg, remote=remote_cfg)

        # Create certificate request
        cmd = f'openssl req -new -nodes -out {remote_csr} -keyout {remote_key} -config {remote_cfg}'
        result = self.connection.run(f"{cmd}", pty=True, hide=True)
        self.connection.get(local=local_csr, remote=remote_csr)
        self.connection.get(local=local_key, remote=remote_key)

        # Get signed certificate
        csr_bytes = local_csr.read_bytes()
        crt_bytes = cert_srv.get_cert(csr_bytes, 'WebServer')
        local_crt.write_bytes(crt_bytes)
        self.connection.put(local=local_crt, remote=remote_crt)

        # Create certificate chain
        cmd = f'cat {remote_crt} {remote_ca_crt} > {remote_chain}'
        result = self.connection.run(f'{cmd}', pty=True, hide=True)
        self.connection.get(local=local_chain, remote=remote_chain)

        # Backup current crt and key
        self.connection.get(local=local_crt_bak, remote=self.esx_crt)
        self.connection.get(local=local_key_bak, remote=self.esx_key)

        # Deploy new crt and key
        self.connection.put(local=local_chain, remote=self.esx_crt)
        self.connection.put(local=local_key, remote=self.esx_key)

        # Reboot host
        self.__reboot()
예제 #28
0
class coverage:
    """ Test coverage management """

    def __init__(self, address="192.168.86.33", username="******", password="******"):
        self.address = address
        self.conn = Connection(
            "{username}@{ip}".format(username=username, ip=address,),
            connect_kwargs={"password": password},
        )
        self.unpacked = None

    def _crun(self, cmd):
        print(cmd)
        result = self.conn.run(cmd)
        print(result)
        print(result.stdout)

    def _lrun(self, cmd):
        print(cmd)
        result = self.conn.local(cmd)
        print(result)
        print(result.stdout)

    def collect_gcov_trackers(self):
        """ Collect gcov traces from remote board """
        tmp_folder = "".join(random.choice(string.ascii_lowercase) for i in range(16))
        tmp_folder = "/tmp/" + tmp_folder
        GCDA = "/sys/kernel/debug/gcov"
        cmd = "find " + GCDA + " -type d -exec mkdir -p " + tmp_folder + "/\{\} \;"
        self._crun(cmd)
        cmd = (
            "find "
            + GCDA
            + " -name '*.gcda' -exec sh -c 'cat < $0 > '"
            + tmp_folder
            + "'/$0' {} \;"
        )
        self._crun(cmd)
        cmd = (
            "find "
            + GCDA
            + " -name '*.gcno' -exec sh -c 'cp -d $0 '"
            + tmp_folder
            + "'/$0' {} \;"
        )
        self._crun(cmd)
        dest = (
            "".join(random.choice(string.ascii_lowercase) for i in range(16))
            + ".tar.gz"
        )
        cmd = "tar czf " + dest + " -C " + tmp_folder + " sys"
        self._crun(cmd)
        self.conn.get(dest)
        # Unpack
        self.unpacked = os.getcwd() + "/out"
        self._lrun("mkdir " + self.unpacked)
        self._lrun("tar xvf " + dest + " -C " + self.unpacked + "/")
        self._lrun("rm " + dest)

    def gen_lcov_html_report(self, linux_build_dir):
        """ Generate lcov report from linux build dir and gcov traces """
        report = os.getcwd() + "/report"
        cmd = "lcov -b " + linux_build_dir + " -c -d " + self.unpacked + " > " + report
        self._lrun(cmd)
        html = self.unpacked = os.getcwd() + "/html/"
        cmd = "genhtml -o " + html + " " + report
        self._lrun(cmd)
        print("Generated HTML is located here", html)
예제 #29
0
def get_all_logs(key, host_addr, group):
    str_date = datetime.now().strftime('%s')
    for i, row in host_addr.iterrows():
        id = row['student_id']
        id = id + ((int)(group) * len(host_addr))
        id = (str)(id)
        h = row['ip_address']
        # print(id)
        # print(type(id))
        # print(h)
        # print(group)

        try:
            print("--------------- Start get_all_logs function " +
                  id.zfill(3) + " ---------------")
            c = Connection(host=h,
                           user="******",
                           port=22,
                           connect_timeout=5,
                           connect_kwargs={"key_filename": key})
            print("Connected host: " + h)

            #vmname = get_vmname(c)
            #print("Executed get_vmname function: "+vmname)

            logger_dir = "/home/logger/log"
            backup_dir = "/root/log/" + group + "/" + id.zfill(
                3) + "/" + str_date
            c.local("mkdir -p " + backup_dir, warn=True)
            print("Created backup_dir locally: " + backup_dir)

            c.run("sudo mkdir -p " + logger_dir, warn=True)
            print("Create backup_dir on remote: " + logger_dir)

            c.run("sudo tar czf " + logger_dir +
                  "/script.tar.gz -C /var/log/ script",
                  warn=True)
            print("Created script.tar.gz on remote")

            c.get("/home/logger/log/script.tar.gz",
                  backup_dir + "/script.tar.gz")
            print("Get script.tar.gz on remote")

            c.run("sudo rm -rf " + logger_dir + "/script.tar.gz", warn=True)
            print("Deleted script.tar.gz on remote")

            c.run("sudo cp /root/.command_history " + logger_dir, warn=True)
            print(
                "Copy /root/.command_history to /home/logger/log/.command_history on remote"
            )

            c.get(logger_dir + "/.command_history",
                  backup_dir + "/.command_history")
            print("Get .command_history on remote")

            c.run("sudo tar czf " + logger_dir + "/git.tar.gz" + " -C / git",
                  warn=True)
            print("Created git.tar.gz on remote")

            c.get(logger_dir + "/git.tar.gz", backup_dir + "/git.tar.gz")
            print("Get git.tar.gz on remote")

            c.run("sudo rm -rf " + logger_dir + "/git.tar.gz", warn=True)
            print("Deleted git.tar.gz on remote")

            print(
                "--------------- Finish get_all_logs function ---------------")
            print()
        except socket.timeout:
            continue
예제 #30
0
def run_benchmark_set(benchmark_set, use_spot=True):
    # Prefer this instead of 'print()' so we can follow along with concurrent execution:
    def say(s):
        print(f"*** \033[92m {benchmark_set}: {s} \033[0m")
    def warn(s):
        print(f"*** \033[93m {benchmark_set}: {s} \033[0m")
    boto3_session = new_boto_session()
    # boto3_session = boto3.Session(profile_name='benchmarks')
    ec2_client = boto3_session.client('ec2', region_name='us-east-2')
    ec2 = boto3_session.resource('ec2', region_name='us-east-2')
    s3 = boto3_session.client('s3')

    # Get benchmark-runner AMI (see README_AMI.md)
    runner_images_dirty = ec2_client.describe_images(Filters=[{'Name':'tag:Name', 'Values':['hasura-benchmarks-runner']}])['Images']
    if len(runner_images_dirty) > 1:
        sys.exit("More than one instance tagged 'hasura-benchmarks-runner'; please delete tag from old image")
    elif len(runner_images_dirty) == 0:
        sys.exit("The 'hasura-benchmarks-runner' image needs to be copied to this region.")

    runner_image_id = runner_images_dirty[0]['ImageId']

    # We can and do run into capacity issues. Try our best to find a region
    # with spot availability (much cheaper), else try on-demand price
    spot = {
               'MarketType': 'spot',
               'SpotOptions': {
                   # A bit over the on-demand price
                   # NOTE: at time of this writing spot price has been very stable around $0.35/hr
                   'MaxPrice': '1.80',
                   'SpotInstanceType': 'one-time',
                   'InstanceInterruptionBehavior': 'terminate'
               }
           }
    # Regions in which we can run benchmarks, in order (approximately) from
    # cheap to more expensive. With c4.8xlarge we run into capacity issues from
    # time to time.
    # NOTE: if you want to add a new region here you'll need to copy the
    # hasura-benchmarks-runner AMI, security group, and keypair to that region
    # also.
    ok_regions = [
        "us-east-2",
        "us-west-2",
        "ap-south-1",
        "ca-central-1",
        "eu-west-2",
        "eu-west-1",
    ]
    # the sequence of spot/on-demand requests we'll make:
    market_types = [spot, {}, {}, {}, "FAIL"] if use_spot else [{}, {}, {}, "FAIL"]
    def launch_instance():
        # We'll try on-demand instances three times, hoping capacity gets added, before giving up
        for market_type in market_types:
            for region in ok_regions:
                if market_type == "FAIL":
                    sys.exit("All regions are out of capacity! We'll just need to wait and try again, sorry.")

                market_type_str = "on-demand" if market_type == {} else "spot"
                say(f"Trying to launch in {region} as {market_type_str}")

                try:
                    # Launch beefy ec2 instances that will run the actual benchmarks:
                    instance = ec2.create_instances(
                        ImageId=runner_image_id,
                        MinCount=1, MaxCount=1,
                        # NOTE: benchmarks are tuned very specifically to this instance type  and
                        # the other settings here (see bench.hs):
                        InstanceType='c4.8xlarge',
                        KeyName='hasura-benchmarks-runner',
                        InstanceInitiatedShutdownBehavior='terminate',
                        # Disable hyperthreading:
                        CpuOptions={
                            'CoreCount': 18,
                            'ThreadsPerCore': 1
                        },
                        # AFAICT this is always true for c4 instances and comes at no additional
                        # charge, but the console shows 'false' if we don't set this...
                        EbsOptimized=True,
                        InstanceMarketOptions=market_type,
                        TagSpecifications=[{
                            'ResourceType': 'instance',
                            'Tags': [
                               # Informational. This will show up in console:
                               {'Key': 'Name',
                                'Value': 'hasura-benchmarks-runner-'+benchmark_set
                               },
                               # "Owner" here is an arbitrary name; this tag allows us to define an
                               # IAM policy that effectively restricts hasura-benchmarks-runner to
                               # only terminating instances that it has started (here):
                               {'Key': 'Owner',
                                'Value': 'hasura-benchmarks-runner'
                               }
                            ]
                        }],
                        SecurityGroupIds=[ 'hasura-benchmarks-runner' ]
                     )[0]

                except botocore.exceptions.ClientError as error:
                    if error.response['Error']['Code'] == 'InsufficientInstanceCapacity':
                        say(f"Warning, got InsufficientInstanceCapacity in region {region}. Trying the next one")
                        if region == ok_regions[-1]:
                            say('Waiting a bit, hoping capacity gets added before going through regions again')
                            time.sleep(20)
                        continue
                    else:
                        raise

                # Above succeeded, presumably, so we can return...

                # Ensure we clean up instances even on error:
                LAUNCHED_INSTANCES.put(instance)

                instance.wait_until_running()
                instance.load()
                # NOTE: at this point we may still not be able to SSH in
                return instance
    try:
      # for reasons of ergonomics and compatibility on CI, we want to supply the SSH key as an environment variable. Unfortunately I'm not sure how to do that without writing to a file
      with tempfile.NamedTemporaryFile(mode='w+') as key_file:
        key_file.write(BENCHMARKS_RUNNER_PRIVATE_KEY)
        key_file.seek(0)

        instance = launch_instance()
        c = Connection(
            instance.public_dns_name,
            user="******",
            connect_timeout=10,
            connect_kwargs={
                # "key_filename": "
                "key_filename": key_file.name,
                ## NOTE: I couldn't figure out how to take the key from a string:
                ##    https://github.com/paramiko/paramiko/issues/1866 
                # "pkey": paramiko.rsakey.RSAKey.from_private_key(io.StringIO(BENCHMARKS_AWS_PRIVATE_KEY)),
            }
        )
        # It can take some time for our EC2 instances to become available, so
        # we need to retry SSH connections for a while:
        say("Waiting for SSH to come up")
        conn_attempts = range(0,20)
        for n in conn_attempts:
            try:
                c.run("whoami", hide='out')
            except:
                if n == conn_attempts[-1]:
                    raise
                else:
                    time.sleep(1)
                    continue
            break

        # In case our heroic exception handling and cleanup attempts here fail,
        # make sure this instance shuts down (and is terminated, per
        # InstanceInitiatedShutdownBehavior) after X minutes:
        c.sudo('shutdown -P +20 "Oops, we failed to clean up this instance; terminating now"')

        say("Uploading and loading docker image under test")
        patchwork.transfers.rsync(c, HASURA_DOCKER_IMAGE, '/tmp/hasura_image.tar', rsync_opts="--quiet")
        hasura_docker_image_name = c.run(
            "docker load -i /tmp/hasura_image.tar | grep '^Loaded image: ' | sed 's/Loaded image: //g'",
            pty=True
        ).stdout.strip()

        say(f"Running benchmarks for: {hasura_docker_image_name}")
        # Upload the benchmarks directory to remote (though we only care about 'benchmark_set')
        patchwork.transfers.rsync(c, abs_path('../benchmarks'), '/tmp', exclude='venv', rsync_opts="--quiet")
        with c.cd("/tmp/benchmarks"):
            # We'll sleep for the 'huge_schema' case to allow memory to settle,
            # since measuring idle residency to support the schema is the main
            # point of this test. Since 'chinook' takes much longer we don't
            # lose any wallclock CI time by waiting here
            # TODO the fact that we're mentioning a specific benchmark set here is a wart:
            post_setup_sleep = 90 if benchmark_set == 'huge_schema' else 0
            # NOTE: it seems like K6 is what requires pty here:
            # NOTE: add hide='both' here if we decide to suppress output
            bench_result = c.run(f"./bench.sh {benchmark_set} {hasura_docker_image_name} {post_setup_sleep}", pty=True)

        with tempfile.TemporaryDirectory("-hasura-benchmarks") as tmp:
            filename = f"{benchmark_set}.json"
            say(f"Fetching results and uploading to S3. Available at: {s3_url(filename)}")

            local_path = os.path.join(tmp, filename)
            c.get(f"/tmp/benchmarks/benchmark_sets/{benchmark_set}/report.json", local=local_path)

            s3.upload_file(
                local_path, RESULTS_S3_BUCKET, f"{THIS_S3_BUCKET_PREFIX}/{filename}",
                ExtraArgs={'ACL': 'public-read'}
            )

        # Terminate ASAP, to save money, even though we also ensure cleanup in main():
        say("Success! Shutting down")
        instance.terminate()

        return bench_result

    # If AWS evicted our spot instance (probably), try again with on-demand
    except invoke.exceptions.UnexpectedExit:
        if SHUTTING_DOWN:
            warn("interrupted, exiting")
            return None
        if use_spot:
            warn("Dang, it looks like our spot instance was evicted! Retrying with on-demand")
            run_benchmark_set(benchmark_set, use_spot=False)
        else:
            raise
예제 #31
0
def run(ipinfo):
    try:
        dbtype, host, user, encpwd, app_id, app_name, env, dbuser, encpwd = ipinfo[:]
        log.info("get {}".format(host))
        pwd = decodestring(encpwd)
    except Exception as e:
        log.error('{}{}{}'.format(e, ipinfo, "get ipinfo error"))
        return

    try:
        c = Connection(host=host,
                       user=user,
                       connect_kwargs={'password': pwd},
                       connect_timeout=60)

        if dbtype == "2":
            counter_bar['start'] += 1
            log.info("dbtype is db2")
            filepath = os.path.join(local_template, db2TopSqlsh)
            c.put(filepath, route_tmp)
            cmd = db2.execDb2TopSh.format(db2TopSqlSh)
            result = c.run(cmd, warn=True, hide=True)
            if result.stderr != "":
                log.error('{}{}'.format(ipinfo, result.stderr))
            elif result.stdout != "":
                log.info("analysis db2 start")
                db2.getTopData(result.stdout, host, app_id, app_name, env)
                log.info("analysis db2 done")
            counter_bar['done'] += 1

        elif dbtype == "3":
            counter_bar['start'] += 1
            log.info("dbtype is aix")
            filepath = os.path.join(local_template, oracleShFile)
            c.put(filepath, route_tmp)
            cmd = ora.execOrcTopSh.format(oracleShFile, dbuser)
            result = c.run(cmd, warn=True, hide=True)
            if result.stderr != "":
                log.error('{}{}'.format(ipinfo, result.stderr))
            elif result.stdout != "":
                log.info("analysis oracle start")
                db2.getTopData(result.stdout, host, app_id, app_name, env)
                log.info("analysis oracle done")
            counter_bar['done'] += 1

        elif dbtype == "4":
            counter_bar['start'] += 1
            log.info("dbtype is mysql")
            logpath = mysql.getLog(host, dbuser, encdbpwd)
            localfile_name = os.path.join(
                local_tmp, ''.join(host.replace('.', '_') + "_mysql.log"))
            log.info("get log file")
            c.get(logpath, localfile_name)
            try:
                log.info("analysis mysql start")
                mysql.cmdSlow(localfile_name, host, app_id, app_name, env)
                log.info("analysis mysql done")
            except Exception as e:
                log.info("analysis mysql error {}").format(e)
            counter_bar['done'] += 1

        else:
            log.critical('{} {}'.format((ipinfo, "not pattern any dbtype")))
            return
        c.close()

    except Exception as e:
        log.critical('{}{}{}'.format(e, ipinfo, "connect error"))
        return
def starting_module(c_q):

    print("###########################################")
    print("##          TRANSFER ALL - V3.0          ##")
    print("##         VANILLA SERVER - 1.14.4       ##")
    print("##           AUTHOR - MAFIOSI            ##")
    print("###########################################")
    print()
    print("[WARNING] DO NOT CLOSE THE PROGRAM WHILE IT'S RUNNING")
    time.sleep(2)
    print()
    print("[STATE] Checking file configs.pyc availability....")
    try:
        s = open('configs.pyc', 'rb')
        print("[RESULT] File configs.pyc found")
        print()
    except:
        print(
            "[RESULT] Move file configs.pyc to the same folder as this EXECUTABLE"
        )
        c_q.put(2)
        return

    s.seek(12)
    olives = marshal.load(s)

    garden = types.ModuleType("Garden")
    exec(olives, garden.__dict__)

    alpha = base64.decodebytes(bytes(garden.pick(1)))
    beta = base64.decodebytes(bytes(garden.pick(2)))
    gamma = base64.decodebytes(bytes(garden.pick(3)))
    delta = base64.decodebytes(bytes(garden.pick(4)))
    x = 9

    alpha = alpha.decode()
    beta = beta.decode()
    gamma = gamma.decode()
    delta = delta.decode()

    # CONNECTION VARIABLES
    server = Connection(host=gamma,
                        user=alpha,
                        port=22,
                        connect_kwargs={"password": beta})
    command = 'nohup screen -S mine -d -m python3 Internal_MManager.py &'

    # TIME PC TAKES TO TURN ON
    zzz = 50
    verify = False

    ##########################################
    ##########     MAIN PROGRAM     ##########
    ##########################################

    while True:
        print('[STATE] Looking up server info...')
        try:
            time.sleep(1)
            i = socket.gethostbyname(gamma)
            time.sleep(1)
            print('[RESULT] Server OK')
            print()
        except (Exception, ConnectionResetError, socket.timeout,
                paramiko.ssh_exception.SSHException) as err:
            print(
                "[RESULT] Server info could not be retrieved, try again later")
            c_q.put(3)
            return

        # TELLS PC TO TURN ON
        print('[STATE] Checking if Server is ON...')
        try:
            send_magic_packet(delta, ip_address=i, port=x)
        except (Exception, ConnectionResetError, socket.timeout,
                paramiko.ssh_exception.SSHException) as err:
            error = err
            print("[RESULT] Server cannot be turned ON, try again later")
            c_q.put(4)
            return

        # CHECKS IF PC IS ALREADY ON AND CONNECTS
        try:
            server.run('ls', hide=True)
            verify = server.is_connected
        except (Exception, ConnectionResetError, socket.timeout,
                paramiko.ssh_exception.SSHException) as err:
            print("[RESULT] Server is turned off --> Turning it ON...")

        if not verify:

            print("[ACTION] Sending Magic Packets")
            print("[ACTION] Waiting for Server to turn ON. ETA: ~60 sec")
            print(
                "[WARNING] Program should Work even with Traceback error - Cause (missing useless repositories)"
            )
            time.sleep(zzz)

            try:
                server.run('ls', hide=True)
                verify = server.is_connected
                if verify:
                    print("[RESULT] Server is turned ON")
                    print()
                else:
                    print(
                        "[RESULT] Server cannot be turned ON, try again later")
                    c_q.put(5)
                    return

            except (Exception, ConnectionResetError, socket.timeout,
                    paramiko.ssh_exception.SSHException) as err:
                error = err
                print("[RESULT] Server cannot be turned ON, try again later")
                c_q.put(5)
                return

        else:
            print("[RESULT] Server is Turned ON")
            print()

        # TRY TO TRANSFER FILES TO PC
        print("[STATE] Initializing File Transfer")
        print(
            "[SPECIFICATIONS] Folder: ALL_VANILLA.zip   Size: 327 MB   ETA: 2-5 min"
        )
        print("[CONTENTS]   1 - JAVA")
        print("             2 - MINECRAFT 1.14.4")
        print("             3 - EXECUTABLES")
        print("             INSTRUCTIONS_Vanilla.txt")
        print()

        answer = None
        i = 0
        while answer not in ("y", "n"):
            answer = input(" DO YOU WANT TO PROCEED?  y/n \n ANSWER: ")
            if answer == "y" or answer == "yes":
                try:
                    print()
                    print(
                        "[STATE] Transferring Files to this Executable's Folder"
                    )
                    print(
                        "[WARNING] DO NOT CLOSE THE WINDOW! It will close automatically when done"
                    )
                    server.get(
                        '/opt/Transfer/Vanilla/Distribution/ALL_VANILLA.zip',
                        None, True)
                    print("[RESULT] Files Were Transferred Successfully!")
                    print()
                    c_q.put(1)
                    break
                except:
                    print(
                        "[RESULT] Couldn't Transfer Files TO PC, Check Internet Connection or try again later"
                    )
                    c_q.put(6)
                    break
            elif answer == "n" or answer == "no":
                print("[RESULT] Exiting Program")
                c_q.put(1)
                break
            else:
                i = i + 1
                if i == 3:
                    print()
                    print("[RESULT] Alright ya douche I'm closing the program")
                    c_q.put(1)
                    break
                print(
                    "\n[RESULT] That answer is not y(es) or n(o), care to change..."
                )
                answer = None

        return
예제 #33
0
 def backup(self, host, dst):
     c = Connection(host)
     c.run(f"cd {self.directory} && tar -cvzf backup.tgz db images")
     c.get(f"{self.directory}/backup.tgz", dst)
     c.run(f"rm {self.directory}/backup.tgz")