예제 #1
0
def add_new_node_ssh():
    nodes_new = {'172.17.0.12': '111111'}
    node_num = 3
    with open('/etc/hosts', 'a') as f:
        for ip, pwd in node_new.items():
            f.write('%s    node%s' % (ip, node_num))
            node_num += 1
            c = Connection(ip,
                           port=22,
                           user='******',
                           connect_kwargs={'password': pwd})
            c.get('/root/.ssh/id_rsa.pub', '/root/.ssh/id_rsa.pub.bak')
            c.local(
                'cat /root/.ssh/id_rsa.pub.bak >> /root/.ssh/authorized_keys')
            c.local('rm -f /root/.ssh/id_rsa.pub.bak')
    nodes.update(nodes_new)
    for ip, pwd in nodes.items():
        c = Connection(ip,
                       port=22,
                       user='******',
                       connect_kwargs={'password': pwd})
        c.run('rm -f /etc/hosts')
        c.put('/etc/hosts', '/etc/hosts')
        a = c.local('find /root/.ssh/ -name authorized_keys')
        if a.stdout.find('authorized_keys') != -1:
            c.run('rm -f /root/.ssh/authorized_keys')
        c.put('/root/.ssh/authorized_keys', '/root/.ssh/authorized_keys')
    print('over')
예제 #2
0
def put_dir(con: Connection, from_dir: str, to_dir: str):
    """ put a directory to server.
    """
    con.local("tar cfz {0}.tar.gz {0}".format(from_dir))
    con.put("{}.tar.gz".format(from_dir), "{}".format(to_dir))
    con.run("tar zxf {1}/{0}.tar.gz -C {1}".format(from_dir, to_dir))
    con.local("rm {}.tar.gz".format(from_dir))
예제 #3
0
def check_server_status(key, st, host_addr, group):
    df = pd.DataFrame(index=[],
                      columns=[
                          "unixtime", "id", "host", "group", "command",
                          "stdout", "stderr", "step"
                      ])
    # command_id = st['command_id']
    run_type = st['run_type']
    command = st['command']

    # print(len(host_addr))
    for i, host in host_addr.iterrows():
        id = host['student_id']
        id = id + ((int)(group) * len(host_addr))
        h = host['ip_address']
        # print(key)
        # print(id)
        # print(h)

        try:
            print("--------------- students_id: " + (str)(id) +
                  " ---------------")
            c = Connection(host=h,
                           user="******",
                           port=22,
                           connect_timeout=2,
                           connect_kwargs={"key_filename": key})
            print("Connected host: " + h)

            backup_dir = settings.SYSTEM_LOG + group + "/status/"
            c.local("mkdir -p " + backup_dir, warn=True)
            print("Created backup_dir locally: " + backup_dir)

            #コマンド が一つでも失敗するとログの収集が行えない.
            if run_type == "run_server_cmd":
                df = run_server_cmd(id, df, h, group, c, command, command_id)
            elif run_type == "run_local_cmd":
                pattern = re.compile(r'ip_address')
                command = pattern.sub(h, command)
                print("command : ", end="")
                print(command)
                df = run_local_cmd(id, df, h, group, c, command, command_id)
            print()
        except:
            print("{}: {}: {}".format(datetime.now().strftime('%s'), h,
                                      "socket.timeout"))
            df_temp = pd.DataFrame([[
                datetime.now().strftime('%s'), id, h, "exception", "", "",
                "socket.timeout", "exception"
            ]])
            df_temp.columns = [
                "unixtime", "team", "host", "group", "command", "stdout",
                "stderr", "step"
            ]
            df = df.append(df_temp, sort=False)
            continue
            df = df.append(df_temp)
    return df
예제 #4
0
class MachineConnection():
    def __init__(self, machine):
        """
        :param machine_config 传入一个机器配置类
        :param 
        """

        self.machine = machine
        self.connection = None
        self._connect()

    def _connect(self):
        """
        连接
        暂时只支持密码
        """
        """  如果机器数量为空,就使用本地机器  @see connection_executor """

        self.connection = Connection(
            host=self.machine.host,
            user=self.machine.user,
            port=self.machine.port,
            connect_kwargs={'password': self.machine.password})

    @property
    def status(self):
        """
        机器连接状态
        
        """

    @property
    def _is_local(self):
        if self.connection.host == LOCAL:
            return True
        else:
            return False

    def run(self, command):
        if self._is_local:
            return self.connection.local(command)
        else:
            return self.connection.run(command)

    def sudo(self, command):
        if self._is_local:
            return self.connection.local(command)
        else:
            return self.connection.sudo(command)
예제 #5
0
def get_logs(key, host_addr, group):
    str_date = datetime.now().strftime('%s')
    # print(len(host_addr))
    for i, row in enumerate(host_addr):
        id = row['student_id']
        id = id + ((int)(group) * len(host_addr))
        id = (str)(id)
        h = row['ip_address']
        # print(id)
        # print(type(id))
        # print(h)
        # print(group)

        try:
            print("Start get_logs function")
            c = Connection(host=h,
                           user="******",
                           port=22,
                           connect_timeout=2,
                           connect_kwargs={"key_filename": key})
            print("Success connection host: " + h)

            #vmname = get_vmname(c)
            #print("Success get_vmname function: "+vmname)

            backup_dir = "/root/log/" + group + "/" + id.zfill(
                3) + "/" + str_date
            c.local("mkdir -p " + backup_dir, warn=True)
            print("Create backup_dir locally: " + backup_dir)

            # c.run("mkdir -p "+backup_dir, warn=True)
            # print("Create backup_dir on remote: "+backup_dir)

            c.run("tar czf /root/script.tar.gz -C /var/log/ script", warn=True)
            print("Create script.tar.gz on remote")

            c.get("/root/script.tar.gz", backup_dir + "/script.tar.gz")
            print("Get script.tar.gz on remote")

            c.run("rm -rf /root/script.tar.gz", warn=True)
            print("Delete script.tar.gz on remote")

            c.get("/root/.command_history", backup_dir + "/.command_history")
            print("Get .command_history on remote")

            print("Finish get_logs function")
        except socket.timeout:
            continue
예제 #6
0
파일: connection.py 프로젝트: fabric/fabric
 def wraps_invoke_run(self):
     # NOTE: most of the interesting tests about this are in
     # invoke.runners / invoke.integration.
     cxn = Connection("localhost")
     result = cxn.local("echo foo", hide=True)
     assert result.stdout == "foo\n"
     assert not cxn.is_connected  # meh way of proving it didn't use SSH
예제 #7
0
class Fabapi(object):
    def __init__(self, hostip):
        connect_kwargs = {
            'pkey': settings.DEPLOYKEY,
            # 'password':''
        }
        self.c = Connection(host=hostip,
                            user=settings.DEPLOYUSER,
                            port=22,
                            **connect_kwargs)

    def locald(self, ld):
        return self.c.local(ld)

    def remoted(self, rd, sudoif=0):
        # if sudoif == 1:
        #     return sudo(rd, pty=False)
        # else:
        #     return run(rd)
        return self.c.run(rd, pty=False, shell=False)

    def getfile(self, local_dest, remote_dest):
        self.c.get(remote_dest, local_dest)

    def putfile(self, local_dest, remote_dest):
        self.c.put(local_dest, remote_dest, mode=0o664)

    def isexists(self, rootdir):
        res = int(
            self.c.run(" [ -e {0} ] && echo 1 || echo 0".format(rootdir),
                       shell=False))
        return res
예제 #8
0
def do_download(connection: Connection, download: dict, backup_filename: str,
                remote_zip_path: str, remote_zip_md5: str) -> None:
    if download["enable"]:
        print("==> downloading compressed file to local directory")

        # Generate absolute download directory path for local
        download_path = path.realpath("{}/{}".format(download["path"],
                                                     backup_filename))

        # Download remote compressed zip file to local download directory
        connection.get(remote_zip_path, download_path)

        # Compare the remote zip and local zip base on md5sum value
        command = connection.local("md5sum {}".format(download_path),
                                   hide=True)
        local_zip_md5 = command.stdout.split(" ")[0]

        if remote_zip_md5 == local_zip_md5:
            print("==> sum is match")
        else:
            print("==> sum is not match")

        # Remove the remote zip if remove is true
        if download["remove"]:
            print("==> removing compressed file")

            connection.run("rm -f {}".format(remote_zip_path))
예제 #9
0
 def wraps_invoke_run(self):
     # NOTE: most of the interesting tests about this are in
     # invoke.runners / invoke.integration.
     cxn = Connection('localhost')
     result = cxn.local('echo foo', hide=True)
     eq_(result.stdout, 'foo\n')
     assert not cxn.is_connected  # meh way of proving it didn't use SSH
예제 #10
0
 def wraps_invoke_run(self):
     # NOTE: most of the interesting tests about this are in
     # invoke.runners / invoke.integration.
     cxn = Connection("localhost")
     result = cxn.local("echo foo", hide=True)
     assert result.stdout == "foo\n"
     assert not cxn.is_connected  # meh way of proving it didn't use SSH
예제 #11
0
def build(c):
    # includes = ['static', 'templates', 'transwarp', 'favicon.ico', '*.py']
    # excludes = ['test', '.*', '*.pyc', "*.pyo"]
    c = Connection('65.49.215.135', user='******', port=27723, connect_kwargs={'password': '******'})
    # result = c.run('ls', hide=True)
    # msg = "Ran {0.command!r} on {0.connection.host}, got stdout:\n{0.stdout}"
    # print(msg.format(result))
    c.local('cd %s' % os.path.join(os.path.abspath('.'), 'www'))
    c.local('del dist\\%s' % _TAR_FILE)
    tar = tarfile.open("dist\\%s" % _TAR_FILE, "w:gz")
    for root, _dir, files in os.walk("www/"):  # 打包www文件夹
        for f in files:
            if not (('.pyc' in f) or ('.pyo' in f)):  # 排除开发过程调试产生的文件,为了简单点实现,此处没有完全照搬廖老师的参数
                fullpath = os.path.join(root, f)
                tar.add(fullpath)
    tar.close()
    deploy(c)
예제 #12
0
class FtpServerHelperStep2:

    def __init__(self, packages_folder_path):
        """

        :param zipfile_path:
        """
        self.packages_folder_path = packages_folder_path
        self.packages_folder_info = PackagesFolderInfo(packages_folder_path)
        self.tar_path = path.join(data_sync_backup_path(), self.packages_folder_info.project_code)
        self.tar_name = "{}_packagecount{}".format(path.join(self.tar_path,
                                                             self.packages_folder_info.packages_folder_name
                                                             ),
                                                   self.packages_folder_info.packages_count
                                                   )

        # 执行本地linux 命令实例
        self.local_connection = Connection("localhost")

    # 目录下的包总数
    def compress_folder(self):
        # 检测压缩存放目录是否存在,不存在则创建
        if not path.exists(self.tar_path):
            os.makedirs(self.tar_path, exist_ok=True)  # 不存在则新建,存在则不管

        command = "tar -zcf {}.tar.gz {} ".format(self.tar_name, self.packages_folder_path)
        cli.info(command)
        self.local_connection.local(command)

    def upload_2_weiruan(self):
        """
        上传到微软服务器的逻辑
        :return:
        """
        #[email protected] 因这种登录方式不便于使用服务器改动,也不想读配置文件
        #此处的ssh 信息将应该是~/.ssh/config中配置好然后直接使用它的host
        command = "rsync -avz --progress {} SurfingtechProductionServer:{}".format(
            path.join(data_sync_backup_path(),
                      self.packages_folder_info.project_code,
                      self.tar_name + ".tar.gz"),
            "/SurfingDataDisk/ftpserver_synchronize_folder"
        )
        cli.info(command)

        self.local_connection.local(command)
예제 #13
0
def generate_magento_build_folder():
    local_con = Connection('localhost')
    php_versions = get_php_versions()
    for php_version in php_versions:
        docker_filename = "m2_apache2-php%s" % php_version

        dic_src = "build_folder_m2/demo"
        dic_dest = "build_folder_m2/%s" % docker_filename
        dic_src_path = os.path.abspath(dic_src)
        dic_dest_path = os.path.abspath(dic_dest)
        local_con.local('rm -rf {0} && mkdir {0}'.format(dic_dest_path))
        local_con.local('cp -r %s/* %s' % (dic_src_path, dic_dest_path))

        file_src = "all_m2/%s" % docker_filename
        file_dest = "build_folder_m2/{0}/{0}".format(docker_filename)
        file_src_path = os.path.abspath(file_src)
        file_dest_path = os.path.abspath(file_dest)
        local_con.local('cp -r %s %s' % (file_src_path, file_dest_path))
예제 #14
0
def generate_magento_docker_compose_fodlers():
    local_con = Connection('localhost')
    php_versions = get_php_versions()
    for php_version in php_versions:
        docker_compose_filename = "m2_apache2-php%s" % php_version
        env_folder_name = 'Apache2-Mysql5.7-PHP%s' % php_version

        dic_src = "docker_compose_folder/demo"
        dic_dest = "docker_compose_folder/%s" % env_folder_name
        dic_src_path = os.path.abspath(dic_src)
        dic_dest_path = os.path.abspath(dic_dest)
        local_con.local('rm -rf {0} && mkdir {0}'.format(dic_dest_path))
        local_con.local('cp -r %s/* %s' % (dic_src_path, dic_dest_path))

        file_src = "all_docker_compose_file/%s" % docker_compose_filename
        file_dest = "docker_compose_folder/{0}/{1}".format(env_folder_name, 'docker-compose.yml')
        file_src_path = os.path.abspath(file_src)
        file_dest_path = os.path.abspath(file_dest)
        local_con.local('cp -r %s %s' % (file_src_path, file_dest_path))
예제 #15
0
def deploy():
    '''部署www代码'''
    includes = ['*.py', 'favicon.ico', 'static', 'templates', '*.sql']
    excludes = ['*test*', '.*', '*.swp']
    # 配置sudo命令的密码
    config = Config(overrides={'sudo': {'password': remote_su_pass}})
    # 以明文方式配置用户登录密码
    conn = Connection(ip,
                      user=remote_user,
                      config=config,
                      connect_kwargs={
                          "allow_agent": False,
                          "password": remote_pass
                      })
    # 压缩www目录中的文件并覆盖dist/dist.tar.gz包
    cmd = ['cd www', '&&' 'tar', '-czvf', '../dist/%s' % tar]
    cmd.extend(['--exclude="%s"' % ex for ex in excludes])
    cmd.extend(includes)
    conn.local(' '.join(cmd))

    newdir = 'www-%s' % datetime.now().strftime('%y-%m-%d_%H.%M.%S')
    # run删除远程服务器上的/tmp/dist.tar.gz
    conn.run('rm -f %s' % remote_tmp_tar)
    # put把本地的dist-awesome.tar.gz放到远程目录/tmp/dist-awesome.tar.gz
    conn.put('dist/%s' % tar, remote_tmp_tar)
    # 创建新目录/srv/awesome/www-year-month-day_hour.minites.seconds
    conn.sudo("mkdir {}/{}".format(remote_wwwroot, newdir))
    # 将/tmp/dist-awesome.tar.gz解压到www-year-month-day_hour.minites.seconds
    conn.sudo("tar -xzvf {} -C {}/{}".format(remote_tmp_tar, remote_wwwroot,
                                             newdir))
    # 更改链接
    conn.sudo("rm -f {}/www".format(remote_wwwroot))
    conn.sudo("ln -s {}/{} {}/www".format(remote_wwwroot, newdir,
                                          remote_wwwroot))
    # 改变权限
    conn.sudo("chown -h www-data:www-data {}/www".format(remote_wwwroot))
    conn.sudo("chown -H -R www-data:www-data {}/www".format(remote_wwwroot))
    # 重启awesome服务
    conn.sudo("supervisorctl stop awesome")
    conn.sudo("supervisorctl start awesome")
    conn.sudo("systemctl reload nginx")
    conn.close()
예제 #16
0
 def mixed_use_of_local_and_run(self):
     """
     Run command truly locally, and over SSH via localhost
     """
     cxn = Connection('localhost')
     result = cxn.local('echo foo', hide=True)
     eq_(result.stdout, 'foo\n')
     assert not cxn.is_connected  # meh way of proving it didn't use SSH yet
     result = cxn.run('echo foo', hide=True)
     assert cxn.is_connected  # NOW it's using SSH
     eq_(result.stdout, 'foo\n')
예제 #17
0
파일: connection.py 프로젝트: fabric/fabric
 def mixed_use_of_local_and_run(self):
     """
     Run command truly locally, and over SSH via localhost
     """
     cxn = Connection("localhost")
     result = cxn.local("echo foo", hide=True)
     assert result.stdout == "foo\n"
     assert not cxn.is_connected  # meh way of proving it didn't use SSH yet
     result = cxn.run("echo foo", hide=True)
     assert cxn.is_connected  # NOW it's using SSH
     assert result.stdout == "foo\n"
예제 #18
0
 def mixed_use_of_local_and_run(self):
     """
     Run command truly locally, and over SSH via localhost
     """
     cxn = Connection("localhost")
     result = cxn.local("echo foo", hide=True)
     assert result.stdout == "foo\n"
     assert not cxn.is_connected  # meh way of proving it didn't use SSH yet
     result = cxn.run("echo foo", hide=True)
     assert cxn.is_connected  # NOW it's using SSH
     assert result.stdout == "foo\n"
예제 #19
0
def restore2local():
    '''
    回复数据库到本地
    '''
    fs = os.listdir(backup_dir)
    files = [
        f for f in fs if f.startswith('backup-') and f.endswith('.sql.tar.gz')
    ]
    files.sort(reverse=True)
    if len(files) == 0:
        print('No backup files found.')
        return
    print('Found %s backup files:' % len(files))
    print('==================================================')
    n = 0
    for f in files:
        print('%s: %s' % (n, f))
        n = n + 1
    print('==================================================')
    try:
        num = int(input('Restore file: '))
    except ValueError:
        print('Invalid file number.')
        return
    restore_file = files[num]
    yn = input('Restore file %s: %s? y/N ' % (num, restore_file))
    if yn != 'y' and yn != 'Y':
        print('Restore cancelled.')
        return
    print('Start restore to local database...')
    sqls = [
        'drop database if exists awesome;', 'create database awesome;',
        'grant select, insert, update, delete on %s.* to \'%s\'@\'localhost\' identified by \'%s\';'
        % (local_sql_db, local_sql_user, local_sql_pass)
    ]
    conn = Connection(ip)
    for sql in sqls:
        conn.local(r'mysql -u{} -p{} -e "{}"'.format(local_sql_Suser,
                                                     local_sql_Spass, sql))
    conn.local('tar zxvf {1:}/{0:} -C {1:}'.format(restore_file, backup_dir))
    conn.local(r'mysql -u{} -p{} {} < {}/{}'.format(local_sql_Suser,
                                                    local_sql_Spass,
                                                    local_sql_db, backup_dir,
                                                    restore_file[:-7]))
    conn.local('rm -f {}/{}'.format(backup_dir, restore_file[:-7]))
예제 #20
0
def pack(c):
    conn = Connection(host)
    conn.local('mkdir -p ./dist/')
    conn.local('rm -f ./dist/{}'.format(tar_file))
    # Pack executable files
    includes = ['*.py']
    excludes = ['fabfile.py', 'test.py', '.*', '*.pyc']
    cmd = ['tar', '-czvf', './dist/{}'.format(tar_file)]
    cmd.extend('--exclude="{}"'.format(ex) for ex in excludes)
    cmd.extend(includes)
    conn.local(' '.join(cmd))
예제 #21
0
def local_install():
    ctx = Connection(local_ip)
    version = '2.4.1'
    os = 'darwin'
    arch = 'amd64'
    name = 'prometheus-{version}.{os}-{arch}'.format(version=version,
                                                     os=os,
                                                     arch=arch)

    download = 'https://github.com/prometheus/prometheus/releases/download/v{0}/{1}.tar.gz'.format(
        version, name)

    ctx.local('rm -rf run-tools/prometheus')
    ctx.local('mkdir -p run-tools/download')
    # TODO 判断当前是否已经下载
    # ctx.local('wget {download} -P run-tools/download'.format(download=download))
    ctx.local('tar -zxvf run-tools/download/{name}.tar.gz -C run-tools'.format(
        name=name))
    ctx.local('mv run-tools/{name} run-tools/prometheus'.format(name=name))
예제 #22
0
def main():
    # Fabric library controlled through global env parameters
    fab_config = Config(
        overrides={
            "connect_kwargs": {
                "key_filename":
                [KEYFILE],  # https://github.com/fabric/fabric/issues/2007
            },
            "run": {
                "echo": True,
                "pty": True,
            },
            "timeouts": {
                "connect": 10,
            },
        })
    # no network connection, so don't worry about closing this one.
    local_cxn = Connection('localhost', config=fab_config)

    # Set up local copy of git repo
    #-------------------------------------------------------------------------------
    print("Making local dir for test repo and logs: %s" % LOGDIR)
    local_cxn.local('mkdir %s' % LOGDIR)

    # figure out what git object to test and locally create it in LOGDIR
    print("Making local git repo")
    try:
        if cl_args.pull_request != '~':
            print('Testing PR %s ' % cl_args.pull_request,
                  "MERGING into master" if cl_args.merge_master else "")
            local_git_PR(local_cxn, cl_args.repo, cl_args.pull_request,
                         cl_args.merge_master)
        elif cl_args.branch != '~':
            print('Testing branch %s of %s' % (cl_args.branch, cl_args.repo))
            local_git_branch(local_cxn, cl_args.repo, cl_args.branch)
        else:
            print('Testing master of %s' % cl_args.repo)
            local_git_clone(local_cxn, cl_args.repo)
    except BaseException:
        print("FAIL: trouble with git repo")
        traceback.print_exc()
        exit()

    # Set up EC2 instances
    #-------------------------------------------------------------------------------
    configdata = yaml.safe_load(open(cl_args.config_file, 'r'))
    targetlist = configdata['targets']
    print('Testing against these images: [%d total]' % len(targetlist))
    for target in targetlist:
        print(target['ami'], target['name'])

    print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s" %
          (PROFILE, KEYNAME, KEYFILE))
    aws_session = boto3.session.Session(profile_name=PROFILE)
    ec2_client = aws_session.resource('ec2')

    print("Determining Subnet")
    for subnet in ec2_client.subnets.all():
        if should_use_subnet(subnet):
            subnet_id = subnet.id
            vpc_id = subnet.vpc.id
            break
    else:
        print("No usable subnet exists!")
        print(
            "Please create a VPC with a subnet named {0}".format(SUBNET_NAME))
        print(
            "that maps public IPv4 addresses to instances launched in the subnet."
        )
        sys.exit(1)

    print("Making Security Group")
    vpc = ec2_client.Vpc(vpc_id)
    sg_exists = False
    for sg in vpc.security_groups.all():
        if sg.group_name == SECURITY_GROUP_NAME:
            security_group_id = sg.id
            sg_exists = True
            print("  %s already exists" % SECURITY_GROUP_NAME)
    if not sg_exists:
        security_group_id = make_security_group(vpc).id
        time.sleep(30)

    boulder_preexists = False
    boulder_servers = ec2_client.instances.filter(
        Filters=[{
            'Name': 'tag:Name',
            'Values': ['le-boulderserver']
        }, {
            'Name': 'instance-state-name',
            'Values': ['running']
        }])

    boulder_server = next(iter(boulder_servers), None)

    print("Requesting Instances...")
    if boulder_server:
        print("Found existing boulder server:", boulder_server)
        boulder_preexists = True
    else:
        print("Can't find a boulder server, starting one...")
        boulder_server = make_instance(
            ec2_client,
            'le-boulderserver',
            BOULDER_AMI,
            KEYNAME,
            machine_type='t2.micro',
            #machine_type='t2.medium',
            security_group_id=security_group_id,
            subnet_id=subnet_id)

    instances = []
    try:
        if not cl_args.boulderonly:
            print("Creating instances: ", end="")
            for target in targetlist:
                instances.append(
                    create_client_instance(ec2_client, target,
                                           security_group_id, subnet_id))
            print()

        # Configure and launch boulder server
        #-------------------------------------------------------------------------------
        print("Waiting on Boulder Server")
        boulder_server = block_until_instance_ready(boulder_server)
        print(" server %s" % boulder_server)

        # host_string defines the ssh user and host for connection
        host_string = "ubuntu@%s" % boulder_server.public_ip_address
        print("Boulder Server at (SSH):", host_string)
        if not boulder_preexists:
            print("Configuring and Launching Boulder")
            with Connection(host_string, config=fab_config) as boulder_cxn:
                config_and_launch_boulder(boulder_cxn, boulder_server)
                # blocking often unnecessary, but cheap EC2 VMs can get very slow
                block_until_http_ready('http://%s:4000' %
                                       boulder_server.public_ip_address,
                                       wait_time=10,
                                       timeout=500)

        boulder_url = "http://%s:4000/directory" % boulder_server.private_ip_address
        print("Boulder Server at (public ip): http://%s:4000/directory" %
              boulder_server.public_ip_address)
        print("Boulder Server at (EC2 private ip): %s" % boulder_url)

        if cl_args.boulderonly:
            sys.exit(0)

        # Install and launch client scripts in parallel
        #-------------------------------------------------------------------------------
        print("Uploading and running test script in parallel: %s" %
              cl_args.test_script)
        print("Output routed to log files in %s" % LOGDIR)
        # (Advice: always use Manager.Queue, never regular multiprocessing.Queue
        # the latter has implementation flaws that deadlock it in some circumstances)
        manager = Manager()
        outqueue = manager.Queue()
        inqueue = manager.Queue()

        # launch as many processes as clients to test
        num_processes = len(targetlist)
        jobs = []  #keep a reference to current procs

        # initiate process execution
        for i in range(num_processes):
            p = mp.Process(target=test_client_process,
                           args=(fab_config, inqueue, outqueue, boulder_url))
            jobs.append(p)
            p.daemon = True  # kills subprocesses if parent is killed
            p.start()

        # fill up work queue
        for ii, target in enumerate(targetlist):
            inqueue.put((ii, instances[ii].id, target))

        # add SENTINELs to end client processes
        for i in range(num_processes):
            inqueue.put(SENTINEL)
        print('Waiting on client processes', end='')
        for p in jobs:
            while p.is_alive():
                p.join(5 * 60)
                # Regularly print output to keep Travis happy
                print('.', end='')
                sys.stdout.flush()
        print()
        # add SENTINEL to output queue
        outqueue.put(SENTINEL)

        # clean up
        local_repo_clean(local_cxn)

        # print and save summary results
        results_file = open(LOGDIR + '/results', 'w')
        outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
        outputs.sort(key=lambda x: x[0])
        failed = False
        for outq in outputs:
            ii, target, status = outq
            if status == Status.FAIL:
                failed = True
            print('%d %s %s' % (ii, target['name'], status))
            results_file.write('%d %s %s\n' % (ii, target['name'], status))
        if len(outputs) != num_processes:
            failed = True
            failure_message = 'FAILURE: Some target machines failed to run and were not tested. ' +\
                'Tests should be rerun.'
            print(failure_message)
            results_file.write(failure_message + '\n')
        results_file.close()

        if failed:
            sys.exit(1)

    finally:
        cleanup(cl_args, instances, targetlist, boulder_server)
예제 #23
0
파일: fabfile.py 프로젝트: anwen/anwen
def backup(c):
    c = Connection('aw')
    """ backup data from aw mongo """
    with c.cd('/var/www/anwen/db'):
        c.run('. ~/.zshrc && python3 db_in_out.py -o')
        c.run('tar czf aw_yaml.tar.gz data')
    with c.cd('/var/www/anwen/docs/shares'):
        c.run('tar czf aw_md.tar.gz *.md')
    with c.cd('/var/www/anwen/static/upload/'):
        c.run('tar czf upload.tar.gz img')
    print('download yaml:')
    with CD(os.path.join(os.getcwd(), 'db/')):
        c.get('/var/www/anwen/db/aw_yaml.tar.gz', 'aw_yaml.tar.gz')
        c.local('tar zxf aw_yaml.tar.gz')
        c.local('rm aw_yaml.tar.gz')
    print('download md:')
    with CD(os.path.join(os.getcwd(), 'docs/shares/')):
        c.get('/var/www/anwen/docs/shares/aw_md.tar.gz', 'aw_md.tar.gz')
        c.local('tar zxf aw_md.tar.gz')
        c.local('rm aw_md.tar.gz')
    print('download img:')
    return
    with CD(os.path.join(os.getcwd(), 'static/upload/')):
        c.get('/var/www/anwen/static/upload/upload.tar.gz', 'upload.tar.gz')
        c.local('tar zxf upload.tar.gz img')
        c.local('rm upload.tar.gz')
예제 #24
0
def main():
    # Fabric library controlled through global env parameters
    fab_config = Config(
        overrides={
            "connect_kwargs": {
                "key_filename":
                [KEYFILE],  # https://github.com/fabric/fabric/issues/2007
            },
            "run": {
                "echo": True,
                "pty": True,
            },
            "timeouts": {
                "connect": 10,
            },
        })
    # no network connection, so don't worry about closing this one.
    local_cxn = Connection('localhost', config=fab_config)

    # Set up local copy of git repo
    #-------------------------------------------------------------------------------
    log_dir = "letest-%d" % int(
        time.time())  #points to logging / working directory
    print("Making local dir for test repo and logs: %s" % log_dir)
    local_cxn.local('mkdir %s' % log_dir)

    try:
        # figure out what git object to test and locally create it in log_dir
        print("Making local git repo")
        if cl_args.pull_request != '~':
            print('Testing PR %s ' % cl_args.pull_request,
                  "MERGING into master" if cl_args.merge_master else "")
            local_git_PR(local_cxn, cl_args.repo, cl_args.pull_request,
                         log_dir, cl_args.merge_master)
        elif cl_args.branch != '~':
            print('Testing branch %s of %s' % (cl_args.branch, cl_args.repo))
            local_git_branch(local_cxn, cl_args.repo, cl_args.branch, log_dir)
        else:
            print('Testing current branch of %s' % cl_args.repo, log_dir)
            local_git_clone(local_cxn, cl_args.repo, log_dir)
    except BaseException:
        print("FAIL: trouble with git repo")
        traceback.print_exc()
        exit(1)

    # Set up EC2 instances
    #-------------------------------------------------------------------------------
    configdata = yaml.safe_load(open(cl_args.config_file, 'r'))
    targetlist = configdata['targets']
    print('Testing against these images: [%d total]' % len(targetlist))
    for target in targetlist:
        print(target['ami'], target['name'])

    print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s" %
          (PROFILE, KEYNAME, KEYFILE))
    aws_session = boto3.session.Session(profile_name=PROFILE)
    ec2_client = aws_session.resource('ec2')

    print("Determining Subnet")
    for subnet in ec2_client.subnets.all():
        if should_use_subnet(subnet):
            subnet_id = subnet.id
            vpc_id = subnet.vpc.id
            break
    else:
        print("No usable subnet exists!")
        print(
            "Please create a VPC with a subnet named {0}".format(SUBNET_NAME))
        print(
            "that maps public IPv4 addresses to instances launched in the subnet."
        )
        sys.exit(1)

    print("Making Security Group")
    vpc = ec2_client.Vpc(vpc_id)
    sg_exists = False
    for sg in vpc.security_groups.all():
        if sg.group_name == SECURITY_GROUP_NAME:
            security_group_id = sg.id
            sg_exists = True
            print("  %s already exists" % SECURITY_GROUP_NAME)
    if not sg_exists:
        security_group_id = make_security_group(vpc).id
        time.sleep(30)

    instances = []
    try:
        print("Creating instances: ", end="")
        # If we want to preserve instances, do not have them self-destruct.
        self_destruct = not cl_args.saveinstances
        for target in targetlist:
            instances.append(
                create_client_instance(ec2_client, target, security_group_id,
                                       subnet_id, self_destruct))
        print()

        # Install and launch client scripts in parallel
        #-------------------------------------------------------------------------------
        print("Uploading and running test script in parallel: %s" %
              cl_args.test_script)
        print("Output routed to log files in %s" % log_dir)
        # (Advice: always use Manager.Queue, never regular multiprocessing.Queue
        # the latter has implementation flaws that deadlock it in some circumstances)
        manager = Manager()
        outqueue = manager.Queue()
        inqueue = manager.Queue()

        # launch as many processes as clients to test
        num_processes = len(targetlist)
        jobs = []  #keep a reference to current procs

        # initiate process execution
        client_process_args = (fab_config, inqueue, outqueue, log_dir)
        for i in range(num_processes):
            p = mp.Process(target=test_client_process,
                           args=client_process_args)
            jobs.append(p)
            p.daemon = True  # kills subprocesses if parent is killed
            p.start()

        # fill up work queue
        for ii, target in enumerate(targetlist):
            inqueue.put((ii, instances[ii].id, target))

        # add SENTINELs to end client processes
        for i in range(num_processes):
            inqueue.put(SENTINEL)
        print('Waiting on client processes', end='')
        for p in jobs:
            while p.is_alive():
                p.join(5 * 60)
                # Regularly print output to keep Travis happy
                print('.', end='')
                sys.stdout.flush()
        print()
        # add SENTINEL to output queue
        outqueue.put(SENTINEL)

        # clean up
        local_repo_clean(local_cxn, log_dir)

        # print and save summary results
        results_file = open(log_dir + '/results', 'w')
        outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
        outputs.sort(key=lambda x: x[0])
        failed = False
        for outq in outputs:
            ii, target, status = outq
            if status == Status.FAIL:
                failed = True
            print('%d %s %s' % (ii, target['name'], status))
            results_file.write('%d %s %s\n' % (ii, target['name'], status))
        if len(outputs) != num_processes:
            failed = True
            failure_message = 'FAILURE: Some target machines failed to run and were not tested. ' +\
                'Tests should be rerun.'
            print(failure_message)
            results_file.write(failure_message + '\n')
        results_file.close()

        if failed:
            sys.exit(1)

    finally:
        cleanup(cl_args, instances, targetlist, log_dir)
예제 #25
0
class SSH(object):
    """
    对 Fabric 的一个简单的封装:
        1. 屏蔽了一些暂时用不到的参数。
        2. 设置了一些对 debug 有利的默认参数
        3. 添加了额外的中文 docstring
    """
    def __init__(
        self,
        host: str,
        port: int,
        username: str,
        password: Optional[str] = None,
        key_file_obj: IO = None,
        key_file_path: Optional[Path] = None,
        key_file_passphrase: Optional[str] = None,
    ):
        """
        使用示例:
        ```python3
        # 1. 使用密码登录远程主机
        ssh_con = SSH("192.168.1.xxx", 22, username="******", password="******")

        # 2. 使用私钥登录远程主机(私钥没有设置 passphrase)
        ## 2.1 指定密钥位置
        ssh_con = SSH("192.168.1.xxx", 22, username="******", key_file_path=Path("~/.ssh/id_rsa"))
        ## 2.2 给出密钥的 IO 对象
        ssh_con = SSH("192.168.1.xxx", 22, username="******", key_file_obj=Path("~/.ssh/id_rsa").open(encoding='utf-8'))
        ssh_con = SSH("192.168.1.xxx", 22, username="******", key_file_obj=StringIO("<private-key-content>"))
        ```
        """
        connect_kwargs = dict()
        if key_file_obj is not None:
            private_key = paramiko.RSAKey.from_private_key(
                key_file_obj, key_file_passphrase)
            connect_kwargs['pkey'] = private_key
        elif key_file_path is not None:
            connect_kwargs = {
                "key_filename": str(key_file_path.resolve()),
                "passphrase": key_file_passphrase
            }
        elif password is not None:
            connect_kwargs['password'] = password
        else:
            raise KeyError("must given password/pkey/private_key")

        self.conn = Connection(host=host,
                               port=port,
                               user=username,
                               connect_kwargs=connect_kwargs)

    def open(self):
        """建立连接。
        使用 run/put/get 命令时,会自动创建连接。
        但是 cd 不行,因为 cd 只是修改本地 session 的东西
        """
        return self.conn.open()

    def close(self):
        """关闭连接"""
        return self.conn.close()

    @property
    def is_connected(self):
        return self.conn.is_connected

    def run(self, cmd: str, warn=False, hide=False, echo=True, **kwargs):
        """
        远程执行命令

        使用示例:
        ```python3
        # 1. 执行命令,打印出被执行的命令,以及命令的输出。命令失败抛出异常
        ssh_con.run("ls -al")
        # 2. 执行命令,命令失败只抛出 warn(对程序的运行没有影响),这样可以手动处理异常情况。
        result = ssh_con.run("ls -al", warn=True)
        if result.return_code != 0:  # 命名执行失败
            # 处理失败的情况

        # 3. 拉取 docker 镜像,只在命令失败的情况下,才输出 docker 命令的日志。
        result = ssh_con.run("docker pull xxxx", hide=True, warn=True)
        if result.return_code != 0:  # 运行失败
            logger.error(result.stdout)  # 打印出错误日志,这里 stdout 一般会包含 stderr
            # 然后考虑要不要抛出异常
        ```

        ==================
        注意!!!run/sudo 并不记录 cd 命令切换的路径!
        如果需要改变 self.cwd (当前工作目录),必须使用 self.cd() 函数,详细的用法参见该函数的 docstring

        官方文档:http://docs.pyinvoke.org/en/0.12.1/api/runners.html#invoke.runners.Runner.run
        :param cmd: 命令字符串
        :param warn: 命令非正常结束时,默认抛异常。如果这个为 True,就只发出 Warning,不抛异常
        :param hide: 是否隐藏掉命令的输出流(stdout/stderr)
        :param echo:是否回显正在运行的命令(最好选择回显,debug很有用)
        :param shell: 指定用于执行命令的 shell
        :param encoding: 字符集
        :return: 一个 Result 对象,该对象的主要参数有:
            command: 被执行的命令
            ok: A boolean equivalent to exited == 0.
            return_code: 命令返回值
            stdout: 命令的标准输出,是一个多行字符串
                程执行命令时可能无法区分 stdout/stderr,这时 stdout 会包含 stderr
        """
        return self.conn.run(command=cmd,
                             warn=warn,
                             hide=hide,
                             echo=echo,
                             **kwargs)

    def sudo(self, command, **kwargs):
        """以 sudo 权限执行命令
        如果设置了密码,就自动使用该密码。
        否则会要求在命令行输入密码(这对运维来说显然不可取)

        注意!!!run/sudo 并不记录 cd 命令切换的路径!
        如果需要改变 self.cwd (当前工作目录),必须使用 self.cd() 函数,详细的用法参见该函数的 docstring

        """
        return self.conn.sudo(command=command, **kwargs)

    def local(self, *args, **kwargs):
        """在本机执行命令"""
        return self.conn.local(*args, **kwargs)

    def cd(self, path: Union[Path, str]):
        """change dir
        self.run()/self.sudo() 命令不会记录由 `cd` 命令造成的工作目录改变,
        要使多个语句都在某个指定的路径下执行,就必须使用 self.cd(),
        (或者你手动在每个 run 指令前,加上 cd /home/xxx/xxx,显然不推荐这么干)

        重点!这是一个类似 open(xxx) 的函数,需要使用 with 做上下文管理。
        用法:
        ```
        with ssh_conn.cd("/tmp"):
            # do some thing
        ```
        出了这个 with 语句块,cd 就失效了。

        ---
        实际上就是给 with 语句块中的每个 run/sudo 命令,添加上 `cd xxx`
        """
        return self.conn.cd(str(path))

    @property
    def cwd(self):
        """currently work dir
        默认为空字符串,表示 $HOME
        """
        return self.conn.cwd

    def get(self,
            remote_file_path: Union[Path, str],
            local: Union[Path, IO] = None,
            preserve_mode: bool = True,
            mkdirs=False):
        """
        从远程主机获取文件到本地
        :param remote_file_path: 远程主机上的文件的路径(不会解析 `~` 符号!建议用绝对路径!)
        :param local: 将文件保存到本地的这个位置/flie-like obj。若未指定,会存放在当前工作目录下(os.getcwd())
        :param preserve_mode: 是否保存文件的 mode 信息(可读/可写/可执行),默认 True
        :param mkdirs: 如果路径不存在,是否自动创建中间文件夹。
        :return: 一个 Result 对象
        """
        if isinstance(local, Path):
            local_path_parent = local.parent
            if local_path_parent.exists() is False:
                if mkdirs:
                    local_path_parent.mkdir(parents=True)
                else:
                    raise FileNotFoundError(
                        "directory '{}' not exist!".format(local_path_parent))

        return self.conn.get(
            remote=str(remote_file_path),
            local=local,
            preserve_mode=preserve_mode,
        )

    def put(self,
            local: Union[Path, IO],
            remote_file_path: Union[Path, str] = Path("."),
            preserve_mode: bool = True,
            mkdirs=False):
        """
        将文件从本地传输给远程主机

        :param local: 本机的文件路径/ file-like obj
        :param remote_file_path: 将文件保存到远程主机上的这个路径下(不会解析 `~` 符号!建议用绝对路径!)
                                 默认传输到远程的 home 目录下
        :param preserve_mode: 是否保存文件的 mode 信息(可读/可写/可执行),默认 True
        :param mkdirs: 如果路径不存在,是否自动创建中间文件夹。
        :return: 一个 Result 对象,该对象不包含 ok 等属性。。。
        """
        if mkdirs:
            parent = Path(remote_file_path).parent
            self.conn.run("mkdir -p '{}'".format(parent))

        return self.conn.put(local=local,
                             remote=Path(remote_file_path).as_posix(),
                             preserve_mode=preserve_mode)

    def put_dir(self,
                local_dir_path: Path,
                remote_path: Union[Path, str] = Path("."),
                preserve_mode: bool = True,
                mkdirs=False):
        """
        将文件夹从本地传输给远程主机

        :param local_dir_path: 本机的文件夹路径
        :param remote_path: 远程主机中,已经存在的一个文件夹的路径(不会解析 `~` 符号!建议用绝对路径!)
                            默认传输到远程的 home 目录下
        :param preserve_mode: 是否保存文件的 mode 信息(可读/可写/可执行),默认 True
        :param mkdirs: 如果路径不存在,是否自动创建中间文件夹。
        :return
        """
        try:
            self.conn.run(f"test -d {Path(remote_path).as_posix()}")
        except UnexpectedExit:
            raise RuntimeError(
                "remote_path 必须是一个已经存在的文件夹路径!请给定正确的 remote_path,或者使用默认参数!")

        stream = tar_files(local_dir_path, c_type="gz", get_stream=True)
        tar_name = local_dir_path.resolve().name + ".tar.gz"
        stream.name = tar_name
        self.put(local=stream,
                 remote_file_path=Path(remote_path).as_posix(),
                 preserve_mode=preserve_mode,
                 mkdirs=mkdirs)
        with self.cd(remote_path):
            self.run("tar -ax -f {}".format(tar_name))
            self.run("rm {}".format(tar_name))
예제 #26
0
class coverage:
    """ Test coverage management """

    def __init__(self, address="192.168.86.33", username="******", password="******"):
        self.address = address
        self.conn = Connection(
            "{username}@{ip}".format(username=username, ip=address,),
            connect_kwargs={"password": password},
        )
        self.unpacked = None

    def _crun(self, cmd):
        print(cmd)
        result = self.conn.run(cmd)
        print(result)
        print(result.stdout)

    def _lrun(self, cmd):
        print(cmd)
        result = self.conn.local(cmd)
        print(result)
        print(result.stdout)

    def collect_gcov_trackers(self):
        """ Collect gcov traces from remote board """
        tmp_folder = "".join(random.choice(string.ascii_lowercase) for i in range(16))
        tmp_folder = "/tmp/" + tmp_folder
        GCDA = "/sys/kernel/debug/gcov"
        cmd = "find " + GCDA + " -type d -exec mkdir -p " + tmp_folder + "/\{\} \;"
        self._crun(cmd)
        cmd = (
            "find "
            + GCDA
            + " -name '*.gcda' -exec sh -c 'cat < $0 > '"
            + tmp_folder
            + "'/$0' {} \;"
        )
        self._crun(cmd)
        cmd = (
            "find "
            + GCDA
            + " -name '*.gcno' -exec sh -c 'cp -d $0 '"
            + tmp_folder
            + "'/$0' {} \;"
        )
        self._crun(cmd)
        dest = (
            "".join(random.choice(string.ascii_lowercase) for i in range(16))
            + ".tar.gz"
        )
        cmd = "tar czf " + dest + " -C " + tmp_folder + " sys"
        self._crun(cmd)
        self.conn.get(dest)
        # Unpack
        self.unpacked = os.getcwd() + "/out"
        self._lrun("mkdir " + self.unpacked)
        self._lrun("tar xvf " + dest + " -C " + self.unpacked + "/")
        self._lrun("rm " + dest)

    def gen_lcov_html_report(self, linux_build_dir):
        """ Generate lcov report from linux build dir and gcov traces """
        report = os.getcwd() + "/report"
        cmd = "lcov -b " + linux_build_dir + " -c -d " + self.unpacked + " > " + report
        self._lrun(cmd)
        html = self.unpacked = os.getcwd() + "/html/"
        cmd = "genhtml -o " + html + " " + report
        self._lrun(cmd)
        print("Generated HTML is located here", html)
예제 #27
0
def get_all_logs(key, host_addr, group):
    str_date = datetime.now().strftime('%s')
    for i, row in host_addr.iterrows():
        id = row['student_id']
        id = id + ((int)(group) * len(host_addr))
        id = (str)(id)
        h = row['ip_address']
        # print(id)
        # print(type(id))
        # print(h)
        # print(group)

        try:
            print("--------------- Start get_all_logs function " +
                  id.zfill(3) + " ---------------")
            c = Connection(host=h,
                           user="******",
                           port=22,
                           connect_timeout=5,
                           connect_kwargs={"key_filename": key})
            print("Connected host: " + h)

            #vmname = get_vmname(c)
            #print("Executed get_vmname function: "+vmname)

            logger_dir = "/home/logger/log"
            backup_dir = "/root/log/" + group + "/" + id.zfill(
                3) + "/" + str_date
            c.local("mkdir -p " + backup_dir, warn=True)
            print("Created backup_dir locally: " + backup_dir)

            c.run("sudo mkdir -p " + logger_dir, warn=True)
            print("Create backup_dir on remote: " + logger_dir)

            c.run("sudo tar czf " + logger_dir +
                  "/script.tar.gz -C /var/log/ script",
                  warn=True)
            print("Created script.tar.gz on remote")

            c.get("/home/logger/log/script.tar.gz",
                  backup_dir + "/script.tar.gz")
            print("Get script.tar.gz on remote")

            c.run("sudo rm -rf " + logger_dir + "/script.tar.gz", warn=True)
            print("Deleted script.tar.gz on remote")

            c.run("sudo cp /root/.command_history " + logger_dir, warn=True)
            print(
                "Copy /root/.command_history to /home/logger/log/.command_history on remote"
            )

            c.get(logger_dir + "/.command_history",
                  backup_dir + "/.command_history")
            print("Get .command_history on remote")

            c.run("sudo tar czf " + logger_dir + "/git.tar.gz" + " -C / git",
                  warn=True)
            print("Created git.tar.gz on remote")

            c.get(logger_dir + "/git.tar.gz", backup_dir + "/git.tar.gz")
            print("Get git.tar.gz on remote")

            c.run("sudo rm -rf " + logger_dir + "/git.tar.gz", warn=True)
            print("Deleted git.tar.gz on remote")

            print(
                "--------------- Finish get_all_logs function ---------------")
            print()
        except socket.timeout:
            continue
"""tests for the 'fabric' package (v2.x)

Loosely inspired by http://docs.fabfile.org/en/2.5/getting-started.html
"""

from fabric import connection, Connection, group, SerialGroup, ThreadingGroup, tasks, task


################################################################################
# Connection
################################################################################
c = Connection("web1")
c.run("cmd1; cmd2")  # $getCommand="cmd1; cmd2"
c.local("cmd1; cmd2")  # $getCommand="cmd1; cmd2"
c.sudo("cmd1; cmd2")  # $getCommand="cmd1; cmd2"

c.local(command="cmd1; cmd2")  # $getCommand="cmd1; cmd2"
c.run(command="cmd1; cmd2")  # $getCommand="cmd1; cmd2"
c.sudo(command="cmd1; cmd2")  # $getCommand="cmd1; cmd2"

# fully qualified usage
c2 = connection.Connection("web2")
c2.run("cmd1; cmd2")  # $getCommand="cmd1; cmd2"


################################################################################
# SerialGroup
################################################################################
results = SerialGroup("web1", "web2", "mac1").run("cmd1; cmd2")  # $getCommand="cmd1; cmd2"

pool = SerialGroup("web1", "web2", "web3")
예제 #29
0
class Remote(object):
    def __init__(self, env):
        if env.gateway != None:
            self.gatewayConn = Connection(
                env.gateway,
                user=env.user,
                connect_kwargs={"password": env.password})
            self.conn = Connection(env.address,
                                   user=env.user,
                                   gateway=self.gatewayConn,
                                   connect_kwargs={"password": env.password})
        else:
            self.conn = Connection(env.host,
                                   user=env.user,
                                   connect_kwargs={"password": env.password})
        # print('\n'.join(['%s:%s' % item for item in self.conn.__dict__.items()]))
        self.result = ""

    def __del__(self):
        pass

    def getResult(self):
        return self.result

    def mkdir(self, dir_remote):
        # 检查指定远程目录是否存在, 不存在则尝试创建
        if not self.checkpath(dir_remote):
            print("ready to mkdir: %s ..." % dir_remote)
            if self.conn.run("mkdir --mode=755 -p %s" % dir_remote).failed:
                return False

        return True

    def checkpath(self, filename):
        # print("check remote path %s ..." % filename)
        # 检查指定远程文件或目录是否存在
        if fab_exists(self.conn, filename) == True:
            # print("remote file '%s' exists ..." % filename)
            return True
        else:
            return False

    def rmfile(self, filename):
        # 删除指定远程文件或目录,略过根目录
        if filename == "/":
            return False

        if self.checkpath(filename):
            if self.conn.sudo("rm -rf %s" % filename).failed:
                return False
        return True

    def cleanup(self, dir_remote):
        if (dir_remote == "" or dir_remote == "/"
                or self.checkpath(dir_remote) == False):
            return False

        if self.conn.sudo("rm -rf %s" % (dir_remote + "/*")).failed:
            return False

        return True

    def cleanupLocal(self, dir_local, sudo=False):
        if (dir_local == "" or dir_local == "/"
                or self.checkpath(dir_local) == False):
            return False

        cmdLocal = "rm -rf %s" % (dir_local + "/*")

        if sudo == True:
            cmdLocal = "sudo -s " + cmdLocal

        if self.conn.local(cmdLocal).failed:
            return False

        return True

    # def rename(self, file_src, file_des):
    #     # 重命名指定远程文件或目录
    #     with settings(warn_only=True):
    #         if exists(file_src, True):
    #             sudo("mv %s %s" % (file_src, file_des))

    def upload(self, dir_local, dir_remote, force=False):
        # 上传文件到远程主机
        print("ready to upload file: %s" % dir_local)

        result_check = self.mkdir(dir_remote)
        if result_check == False:
            print("failed to mkdir: %s" % dir_remote)

        file_name = os.path.basename(dir_local)
        file_remote = dir_remote + file_name

        if self.checkpath(file_remote) == True and force == False:
            print("remote file exists, skip upload ...")
            return True

        try:
            self.conn.put(dir_local, dir_remote)
        except Exception:
            print("Exception: exception occurred when upload file ...")
            print(traceback.print_exc())
            return False

        # print '\n'.join(['%s:%s' % item for item in result.__dict__.items()])
        # print(result)
        # if result.failed:
        # abort("Aborting file upload task, task failed!")
        # else:
        print("end to upload.")
        # sudo("ls -l %s" % dir_remote)
        # 列表文件
        # self.conn.sudo("ls -l %s" % dir_remote)
        return True

    def download(self, dir_remote, dir_local):
        # 从远程主机下载文件
        print("ready to download file: %s " % dir_remote)

        if not exists(dir_local):
            print("ready to mkdir: %s ..." % dir_local)
            if self.conn.local("mkdir --mode=755 -p %s" % dir_local).failed:
                print("Error: failed to mkdir on local machine, path: %s" %
                      dir_local)
                return False

        baseName = os.path.basename(dir_remote)
        fileLocal = dir_local + baseName
        if exists(fileLocal):
            print("local file exists, skip download ...")
            return True

        isOk = True

        try:
            f = open(fileLocal, 'wb')
            self.result = self.conn.get(dir_remote, f)
        except Exception:
            print("Exception: exception occurred when download file ...")
            print("File: " + dir_remote)
            print(traceback.print_exc())
            isOk = False
        finally:
            if f:
                print("ready to close file: " + fileLocal)
                f.close()

        # if self.result.failed:
        #     print("Error: failed to download file ...")
        #     print("File: "+dir_remote)
        #     return False
        # print('\n'.join(['%s:%s' % item for item in result.__dict__.items()]))

        print("end to download.")
        # 列表文件
        # self.conn.local("ls -l %s" % dir_local)

        return isOk

    def sudo(self, cmd):
        # 在远程主机上以sudo权限运行指定命令
        print("start super run ...")
        try:
            self.result = self.conn.sudo("%s" % cmd)
            if self.result.failed:
                print("Error: failed to execute remote command ...")
                print("Command: " + cmd)
                return False
        except Exception:
            print(
                "Exception: exception occurred when executing remote command ..."
            )
            print("Command: " + cmd)
            print(traceback.print_exc())
            return False
        print("end super run.")
        return True

    def run(self, cmd):
        # 在远程主机上以当前用户身份运行指定命令
        print("start run ...")
        try:
            self.result = self.conn.run("%s" % cmd)
            if self.result.failed:
                print("Error: failed to execute remote command ...")
                print("Command: " + cmd)
                return False
        except Exception:
            print(
                "Exception: exception occurred when executing remote command ..."
            )
            print("Command: " + cmd)
            return False
        print("end run.")
        return True

    def local(self, cmd):
        # 在本地主机上运行指定命令
        print("start run ...")
        self.result = self.conn.local("%s" % cmd)
        if self.result.failed:
            print("Error: failed to execute local command ...")
            return False
        print("end run.")
        return True
예제 #30
0
def mytask(con: Connection):
    con.local("echo 'do on local'")
    put_dir(con, "/tmp", "tmp")
    con.run("echo 'do something'")
예제 #31
0
def check_server_status(key, host_addr, group):
    df = pd.DataFrame(index=[],columns=["unixtime","id","host","group","command","stdout","stderr","step"])
    # print(len(host_addr))
    for i, row in host_addr.iterrows():
        id = row['student_id']
        id = id + ((int)(group) * len(host_addr))
        h = row['ip_address']
        print(key)
        print(id)
        print(h)
        
        try:
            print("--------------- Start check_server_status function "+(str)(id)+" ---------------")
            c = Connection(host=h,user="******",port=22,connect_timeout=2,connect_kwargs={"key_filename":key})
            print("Connected host: "+h)

            backup_dir = settings.SYSTEM_LOG + "/" + group + "/status"
            c.local("mkdir -p "+backup_dir, warn=True)
            print("Created backup_dir locally: "+backup_dir)

            #コマンド が一つでも失敗するとログの収集が行えなくなってしまった.
            df = run_server_cmd(id, df, h, group, c, "sudo yum list installed | grep httpd", "step1-1")
            print("step1-1")
            df = run_server_cmd(id, df, h, group, c,
                                "sudo systemctl status httpd", "step1-2")
            print("step1-2")
            df = run_server_cmd(id, df, h, group, c,
                                "sudo systemctl status firewalld", "step1-3")
            print("step1-3")
            df = run_local_cmd(id, df, h, group, c, "curl http://" + h + ":80 --connect-timeout 5 -sS", "step1-4")
            print("step1-4")
            df = run_server_cmd(
                id, df, h, group, c, "sudo find /var/www/html/hello.txt", "step1-5-1")
            print("step1-5-1")
            df = run_server_cmd(
                id, df, h, group, c, "sudo find /var/www/html/hellow.txt", "step1-5-2")
            print("step1-5-2")
            df = run_local_cmd(id, df, h, group, c, "curl http://" + h + ":80/hello.txt --connect-timeout 5 -sS", "step1-6-1")
            print("step1-6-1")
            df = run_local_cmd(id, df, h, group, c, "curl http://" + h + ":80/hellow.txt --connect-timeout 5 -sS", "step1-6-2")
            print("step1-6-2")
            df = run_local_cmd(id, df, h, group, c, "curl http://" + h + ":80 --connect-timeout 5 -sS", "step1-7")
            print("step1-7")
            df = run_server_cmd(
                id, df, h, group, c, "sudo tail -n 3 /var/log/httpd/access_log", "step2")
            print("step2")
            df = run_server_cmd(id, df, h, group, c,
                                "sudo find /var/www/html/ensyu.txt", "step3-1")
            print("step3-1")
            df = run_local_cmd(id, df, h, group, c, "curl http://" + h + ":80/ensyu.txt --connect-timeout 5 -sS", "step3-2")
            print("step3-2")
            #df = run_server_cmd(id,df,h,group,c,"find /var/www/rootdirectory","step4-1")
            #print("step4-1")
            df = run_server_cmd(
                id, df, h, group, c, "sudo find /var/www/rootdirectory/ensyu.txt", "step4-2")
            print("step4-2")
            df = run_local_cmd(id, df, h, group, c, "curl http://" + h + ":80/ensyu.txt --connect-timeout 5 -sS", "step4-3")
            print("step4-3")
            df = run_server_cmd(
                id, df, h, group, c, "sudo cat /etc/httpd/conf/httpd.conf | grep Listen", "step5-1")
            print("step5-1")
            df = run_local_cmd(id, df, h, group, c, "curl http://" + h + ":443 --connect-timeout 5 -sS", "step5-2")
            print("step5-2")
            df = run_server_cmd(
                id, df, h, group, c, "sudo cat /etc/httpd/conf/httpd.conf | grep DirectoryIndex", "step6-1")
            print("step6-1")
            df = run_local_cmd(id, df, h, group, c, "curl http://" + h + ":443 --connect-timeout 5 -sS", "step6-2")
            print("step6-2")
            df = run_server_cmd(id, df, h, group, c,
                                "sudo systemctl status firewalld", "step7-1")
            print("step7-1")
            df = run_local_cmd(id, df, h, group, c, "curl http://" + h + ":443 --connect-timeout 5 -sS", "step7-2")
            print("step7-2")
            df = run_server_cmd(id, df, h, group, c,
                                "sudo yum list installed | grep php", "step8-1")
            print("step8-1")
            df = run_server_cmd(id, df, h, group, c,
                                "sudo systemctl status httpd", "step8-2")
            print("step8-2")
            df = run_server_cmd(
                id, df, h, group, c, "sudo find /var/www/rootdirectory/ensyu.txt", "step8-3")
            print("step8-3")
            df = run_local_cmd(id, df, h, group, c, "curl http://" + h+":443/phptest.php --connect-timeout 5 -sS", "step8-4")
            print("step8-4")

            print("--------------- Finish check_server_status function ---------------")
            print()
        except:
            print("{}: {}: {}".format(datetime.now().strftime('%s'),h,"socket.timeout"))
            df_temp = pd.DataFrame([[datetime.now().strftime('%s'),id,h,"exception","","","socket.timeout","exception"]])
            df_temp.columns = ["unixtime","team","host","group","command","stdout","stderr","step"]
            df = df.append(df_temp, sort=False)
            continue
            df = df.append(df_temp)
    return df
예제 #32
0
def deploy(_, env="dev"):
    config = load_config(env)
    work_dir = config["work_dir"]
    package_file_name = config["package_file_name"]
    system_python = config["system_python"]
    config_file_name = config["config_file_name"]

    connection = Connection(config["host"])
    connection.config.run.echo = True

    # 打包
    connection.local("python setup.py sdist")
    logging.info("【代码打包】OK")

    # 上传
    create_dir(connection, work_dir)
    upload(connection, "dist", package_file_name, work_dir, package_file_name)

    # 虚拟环境
    test_venv_cmd = "test -f {work_dir}/venv/bin/activate".format(
        work_dir=work_dir)
    if connection.run(test_venv_cmd, warn=True).failed:
        create_venv = "virtualenv -p {system_python} {work_dir}/venv".format(
            work_dir=work_dir, system_python=system_python)
        connection.run(create_venv)
        logging.info("【虚拟环境创建】OK")

    # 更新安装包
    install_requirements_cmd = "easy_install {package_file_name}".format(
        package_file_name=package_file_name, work_dir=work_dir)
    with connection.cd(work_dir):
        with connection.prefix(". venv/bin/activate"):
            connection.run(install_requirements_cmd)
    logging.info("【安装包更新】OK")

    # 配置上传
    upload(connection, "configure", config_file_name, work_dir, "config.py")
    upload(connection, ".", "uwsgi.ini", work_dir, "uwsgi.ini")

    # 服务启动 / 重启
    create_dir(connection, os.path.join(work_dir, "logs"))
    with connection.cd(work_dir):
        with connection.prefix(". venv/bin/activate"):
            check_service1_command = "autointerface check-service1"
            check_service2_command = "autointerface check-service2"
            start_service_command = "autointerface start"
            restart_service_command = "autointerface restart"
            if connection.run(check_service2_command, warn=True).failed:
                connection.run(start_service_command)
                action = "启动"
            else:
                connection.run(restart_service_command)
                action = "重启"

    # 成功确认
    with connection.cd(work_dir):
        with connection.prefix(". venv/bin/activate"):
            if connection.run(check_service1_command, warn=True).failed:
                logging.error("【%s】失败" % action)
            elif connection.run(check_service2_command, warn=True).failed:
                logging.error("【%s】失败" % action)
            else:
                logging.info("【%s】成功" % action)
예제 #33
0
파일: fabfile.py 프로젝트: anwen/anwen
def backup(c):
    c = Connection('aw')
    """ backup data from aw mongo and download"""
    with c.cd('/var/www/anwen/db'):
        c.run('. ~/.zshrc && python3 db_in_out.py -o')
        c.run('tar czf aw_yaml.tar.gz data')
    with c.cd('/var/www/anwen/docs/shares'):
        c.run('tar czf aw_md.tar.gz *.md')
    with c.cd('/var/www/anwen/static/upload/'):
        c.run('tar czf upload.tar.gz img')
    print('download yaml:')
    with CD(os.path.join(os.getcwd(), 'db/')):
        c.get('/var/www/anwen/db/aw_yaml.tar.gz', 'aw_yaml.tar.gz')
        c.local('tar zxf aw_yaml.tar.gz')
        c.local('rm aw_yaml.tar.gz')
    print('download md:')
    with CD(os.path.join(os.getcwd(), 'docs/shares/')):
        c.get('/var/www/anwen/docs/shares/aw_md.tar.gz', 'aw_md.tar.gz')
        c.local('tar zxf aw_md.tar.gz')
        c.local('rm aw_md.tar.gz')
    print('download img:')
    return
    with CD(os.path.join(os.getcwd(), 'static/upload/')):
        c.get('/var/www/anwen/static/upload/upload.tar.gz', 'upload.tar.gz')
        c.local('tar zxf upload.tar.gz img')
        c.local('rm upload.tar.gz')