def config_env(): ctx = SerialGroup(*deploy_hosts) ctx.run("whoami") ctx.run("hostname") ctx.run( "echo 'set nu\nset incsearch\nset hlsearch\nset background=dark\n' >> ~/.vimrc " ) ctx.run("echo 'export ES_JAVA_OPTS=\"-Xms28g -Xmx28g\"\n" "export ES_HEAP_SIZE=28G' >> /home/app/.bash_profile ") ctx.run('sudo sysctl -w vm.max_map_count=262144')
def load_es_package(): ctx = SerialGroup(*deploy_hosts) ctx.run('mkdir -p %s' % file_load_path) ctx.run( 'cd %s && wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.2.tar.gz' ' -e use_proxy=yes -e http_proxy=rec-httpproxy01:3128' % file_load_path) ctx.run('cd %s && tar -zxvf elasticsearch-6.2.2.tar.gz' % file_load_path)
def retrieve_smis(server_list, out_filename=".smi_saves"): out_file = open(out_filename, "w", encoding="utf-8") server_group = SerialGroup(*server_list) try: return server_group.run('echo "new connection:" && hostname && \ nvidia-smi', out_stream=out_file, hide=True) except GroupException as e: problem_list = [] for key, val in e.result.items(): if type(val) is not Result or val.failed: problem_list.append(key.original_host) return 'Err', problem_list
from fabric import Connection, Config from fabric import SerialGroup as Group # create group of remote servers group = Group('localhost', 'localhost', user='******', port=2222) # apply commands to multiple servers group.run('whoami') group.run('uname -a') # return GroupResult obj results = group.run('whoami') # show specific log for connection, result in results.items(): print("{0.host}: {1.stdout}".format(connection, result))
c.local(command="cmd1; cmd2") # $getCommand="cmd1; cmd2" c.run(command="cmd1; cmd2") # $getCommand="cmd1; cmd2" c.sudo(command="cmd1; cmd2") # $getCommand="cmd1; cmd2" # fully qualified usage c2 = connection.Connection("web2") c2.run("cmd1; cmd2") # $getCommand="cmd1; cmd2" ################################################################################ # SerialGroup ################################################################################ results = SerialGroup("web1", "web2", "mac1").run("cmd1; cmd2") # $getCommand="cmd1; cmd2" pool = SerialGroup("web1", "web2", "web3") pool.run("cmd1; cmd2") # $getCommand="cmd1; cmd2" # fully qualified usage group.SerialGroup("web1", "web2", "mac1").run("cmd1; cmd2") # $getCommand="cmd1; cmd2" ################################################################################ # ThreadingGroup ################################################################################ results = ThreadingGroup("web1", "web2", "mac1").run("cmd1; cmd2") # $getCommand="cmd1; cmd2" pool = ThreadingGroup("web1", "web2", "web3") pool.run("cmd1; cmd2") # $getCommand="cmd1; cmd2" # fully qualified usage group.ThreadingGroup("web1", "web2", "mac1").run("cmd1; cmd2") # $getCommand="cmd1; cmd2"
"""tests for the 'fabric' package (v2.x) Most of these examples are taken from the fabric documentation: http://docs.fabfile.org/en/2.5/getting-started.html See fabric-LICENSE for its' license. """ from fabric import Connection c = Connection('web1') result = c.run('uname -s') c.run(command='echo run with kwargs') from fabric import SerialGroup as Group results = Group('web1', 'web2', 'mac1').run('uname -s') from fabric import SerialGroup as Group pool = Group('web1', 'web2', 'web3') pool.run('ls') # using the 'fab' command-line tool from fabric import task @task def upload_and_unpack(c): if c.run('test -f /opt/mydata/myfile', warn=True).failed: c.put('myfiles.tgz', '/opt/mydata') c.run('tar -C /opt/mydata -xzvf /opt/mydata/myfiles.tgz')
class FabricTasks: def __init__(self): host_list = open('InstancesConfigurations/public_ips', 'r').read().splitlines() self.connections = [] self.pool = Serial() party_id = 0 for host in host_list: c = Connection(host, user='******', connect_kwargs={ 'key_filename': ['%s/Keys/matrix.pem' % Path.home()] }) c.party_id = party_id party_id += 1 self.connections.append(c) self.pool.append(c) def pre_process(self, task_idx): """ Execute pre process tasks on the remote hosts. :param task_idx: int :return: 0 on success, 1 on failure """ for conn in self.pool: conn.put('Execution/pre_process.py', Path.home()) try: self.pool.run('python3 pre_process.py %s' % task_idx) except BaseException as e: print(e.message) return 1 return 0 def install_git_project(self, username, password, git_branch, working_directory, git_address, external): """ :param username: string :param password: string :param git_branch: string :param working_directory: list[string] :param git_address: list[string] :param external: string :return: """ # result = self.pool.run('ls' % working_directory, warn=True).failed # if result: self.pool.run('rm -rf %s' % working_directory) r = self.pool.run( 'git clone %s %s' % (git_address.format(username, password), working_directory)) self.pool.run('cd %s && git pull' % working_directory) self.pool.run('cd %s && git checkout %s ' % (working_directory, git_branch)) if external: self.pool.run('cd %s/MATRIX && ./build.sh') else: if self.pool.run('cd %s && ls CMakeLists.txt' % working_directory, warn=True).succeeded: self.pool.run( 'cd %s && rm -rf CMakeFiles CMakeCache.txt Makefile' % working_directory) self.pool.run('cd %s && cmake .' % working_directory) self.pool.run('cd %s && make' % working_directory) self.pool.run('cd %s && 7za -y x \"*.7z\"' % working_directory, warn=True) def run_protocol(self, cxn, config_file, args, executable_name, working_directory, party_id): with open(config_file, 'r') as data_file: data = json.load(data_file, object_pairs_hook=OrderedDict) external_protocol = json.loads(data['isExternal'].lower()) protocol_name = data['protocol'] if 'aws' in data['CloudProviders']: regions = data['CloudProviders']['aws']['regions'] elif 'azure' in data['CloudProviders']: regions = data['CloudProviders']['azure']['regions'] elif len(data['CloudProviders']) > 1: regions = data['CloudProviders']['aws']['regions'] + data[ 'CloudProviders']['scaleway']['regions'] else: regions = [] args = ' '.join(args) # local execution if len(regions) == 0: number_of_parties = len(self.pool) for idx in range(number_of_parties): if external_protocol: cxn.run('cd %s && ./%s %s %s &' % (working_directory, executable_name, idx, args)) else: cxn.run('cd %s && ./%s partyID %s %s &' % (working_directory, executable_name, idx, args)) # remote execution else: # copy parties file to hosts if len(regions) > 1: cxn.put('InstancesConfigurations/parties%s.conf' % party_id, working_directory) cxn.run('mv parties%s.conf parties.conf' % party_id) else: cxn.put( '/home/liork/MATRIX/InstancesConfigurations/parties.conf') cxn.run('mv parties.conf HyperMPC') cxn.run("kill -9 `ps aux | grep %s | awk '{print $2}'`" % executable_name, warn=True) # libscapi protocol if not external_protocol: cxn.run('cd %s && chmod +x %s' % (working_directory, executable_name)) cxn.run('cd %s && ./%s partyID %s %s &' % (working_directory, executable_name, party_id, args)) # external protocols else: # with coordinator if 'coordinatorConfig' in data: cxn.put('InstancesConfigurations/parties.conf', '%s/MATRIX' % working_directory) if protocol_name == 'SCALE-MAMBA': cxn.put('InstancesConfigurations/public_ips', '%s/MATRIX' % working_directory) cxn.put(cxn.connect_kwags.key_filename, '%s/MATRIX' % working_directory) if party_id == 0: coordinator_executable = data['coordinatorExecutable'] coordinator_args = ' '.join(data['coordinatorConfig']) cxn.run('./%s %s' % (coordinator_executable, coordinator_args)) else: # run the parties that depends in the coordinator self.pool.run('./%s %s %s' % (executable_name, party_id - 1, args)) with open('Execution/execution_log.log', 'a+') as log_file: log_file.write('%s\n' % args) else: # run external protocols with no coordinator self.pool.run('. ./%s %s %s' % (executable_name, party_id, args)) with open('Execution/execution_log.log', 'a+') as log_file: log_file.write('%s\n' % args) def run_protocol_profiler(self): pass def run_protocol_latency(self): pass def update_libscapi(self): pass def get_logs(self): pass def test_task(self, c, party_id): print(c, party_id)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import getpass from fabric import SerialGroup sudo_pass = getpass.getpass("input your password: "******"password": sudo_pass}) #pool = SerialGroup('arch-od-tracker02.beta1.fn', 'arch-od-tracker03.beta1.fn', user='******', connect_kwargs={"password": '******'} ) pool.run('hostname')