Exemplo n.º 1
0
def benchmark():
    with open(os.path.expanduser("~/.credentials.conf")) as fh:
        credentials = fh.readline().strip()
    tenant_id, auth_token = credentials.split(";")
    region = env.host_string[-3:]
    # remove the previous benchmarks, if any
    run("rm -rf /root/.tsung/log/*")
    run("REGION=%s TENANT_ID=%s AUTH_TOKEN=%s bash /root/csi-marconi/benchmark.sh" %
        (region, tenant_id, auth_token))
    # get the benchmarks directory name
    output = run("ls /root/.tsung/log/")
    benchmark = output.stdout
    webpages_dir = "/usr/share/nginx/html/" + region + "/" + benchmark + "/"
    local_benchmarks_dir = "/root/logs/" + region + "/" + benchmark
    remote_benchmarks_dir = "/root/.tsung/log/" + benchmark
    # copy benchmark logs to local logs dir
    get(remote_benchmarks_dir, "/root/logs/" + region)
    # copy the benchmark logs to local temporary directory
    # local("cp -R " + "/root/logs/" + region + "/" + benchmark, "/root/logs/" + region + "/tmp")
    # create a data directory for csv files
    local("mkdir -p " + local_benchmarks_dir + "/csv_data")
    # generate reports
    local("cd " + local_benchmarks_dir +
          " && perl ~/csi-marconi/tsung_stats_ng.pl -t ~/csi-marconi/templates")
    local("mkdir -p " + webpages_dir)
    # copy the reports to website directory: /usr/sahre/nginx/html
    local("cp    " + local_benchmarks_dir + "/report.html    " + webpages_dir)
    local("cp    " + local_benchmarks_dir + "/graph.html     " + webpages_dir)
    local("cp -R " + local_benchmarks_dir + "/images         " + webpages_dir)
    local("cp    " + local_benchmarks_dir + "/urlerrors.html " + webpages_dir)
    local("cp -R " + local_benchmarks_dir + "/datas.html     " + webpages_dir)
    local("cp -R " + local_benchmarks_dir + "/data           " + webpages_dir)
    local("cp    " + local_benchmarks_dir + "/tsung.xml      " + webpages_dir)
    local("cd    " + webpages_dir + " && ln -s ../../static ./static")
Exemplo n.º 2
0
def version_state(name, prefix=False, no_content=False):
    """
    If the server state exists return parsed json as a python object or True 
    prefix=True returns True if any files exist with ls [prefix]*
    """
    if env.project_fullname:
        full_name = "-".join([env.project_fullname, name])
    else:
        full_name = name
    current_state = False
    state = State(full_name)
    state_path = "/var/local/woven/%s" % full_name
    if not prefix and not no_content and exists(state_path):
        content = int(sudo("ls -s %s" % state_path).split()[0])  # get size
        if content:
            fd, file_path = tempfile.mkstemp()
            os.close(fd)
            get(state_path, file_path)
            with open(file_path, "r") as f:
                content = f.read()
                object = json.loads(content)
                current_state = object
        else:
            current_state = True
    elif not prefix and no_content and exists(state_path):
        current_state = True
    elif prefix:
        with settings(warn_only=True):  # find any version
            current_state = sudo("ls /var/local/woven/*%s" % name)
        if not current_state.failed:
            current_state = True

    return current_state
    def setup_password_less_ssh_across_hosts(self):
        # Setup /etc/hosts on all
        res = execute(self.set_etc_hosts, self)

        # Add to known_hosts for each node (Ex: master adds slave1 & slave2)
        for i in xrange(self.args.nnodes):
            self.set_host(INSTANCE_DICT['all']['publicIps'][i])
            res = execute(self.set_known_hosts, self, INSTANCE_DICT['all']['hostnames'][i])

        # Create Key in master
        self.set_hosts(HOSTS_MASTER)
        res = execute(self.create_keys, self)

        # Get Private/Public Key from master
        fd = StringIO()
        env.host_string = INSTANCE_DICT['master']['publicIps'][0]
        get(remote_path='~/.ssh/id_rsa', local_path=".")
        get(remote_path='~/.ssh/id_rsa.pub', local_path=fd)
        print(fd.getvalue())

         # Copy the Private Key downloaded from master (to local) to Slaves
        for i in xrange(self.args.nnodes - 1):
            cmd = "scp -i {0} -o StrictHostKeyChecking=no ./id_rsa root@{1}:~/.ssh/".format(self.args.identity_file,
                                                                INSTANCE_DICT['slaves']['publicIps'][i])
            local(cmd)

        # Set authorized keys in all
        self.set_hosts(HOSTS_ALL)
        res = execute(self.set_authorized_keys, self, fd.getvalue())
Exemplo n.º 4
0
def download_remote_file_to_tempfile(remoteFileName):
  """Downloads a file from a server to a \
  `tempfile.NamedTemporaryFile \
  <https://docs.python.org/2/library/tempfile.html#tempfile.NamedTemporaryFile>`_.

  **NOTE:** This function calls the `close` method on the NamedTemporaryFile.

  **NOTE:** The caller is reponsible for deleting the NamedTemporaryFile.

  Args:
    remoteFileName(str): name of the file on the server

  Returns:
    str: name of the temporary file whose contents is the same as the file on
      the server

  >>> downloadedFileName = download_remote_file_to_tempfile(
        "/home/ubuntu/a/search.rb"
      )
  >>> with open(downloadedFileName, "r") as f:
        # do some processing here...
  >>> os.unlink(downloadedFileName) # delete the file
  """
  downloadedDotfile = tempfile.NamedTemporaryFile(delete=False)
  downloadedDotfileName = downloadedDotfile.name
  downloadedDotfile.close()
  # hide warning of an existing file getting overwritten
  with settings(hide("warnings")):
    get(remoteFileName, downloadedDotfileName)
  return downloadedDotfileName
Exemplo n.º 5
0
def add_sshfs_mount(*args):
    "install a list of sshfs mountpoints"
    FSTAB_PATTERN = "sshfs#{host}:{remotepath}\t{mountpoint}\tfuse\tdefaults,allow_other,exec,reconnect,transform_symlinks\t0 0"
    for mount in args:
        host = mount['host']
        remotepath = mount['remotepath']
        mountpoint = mount['mountpoint']
        excludes = mount['excludes']
        if env.host in excludes:
            print '%s is excluded from mountpoint.' % (env.host,)
            continue
    
        add_mount_point = True
        tmp_path = '/tmp/fstab.tmp'
        get("/etc/fstab", tmp_path)
        fstab_entry = FSTAB_PATTERN.format(host=host,
                                           remotepath=remotepath,
                                           mountpoint=mountpoint,)
    
        with open(tmp_path, 'r') as file:
            for line in file.readlines():
                if mountpoint in line:
                    add_mount_point = False
        if add_mount_point:
            with open(tmp_path, 'a') as file:
                file.write(fstab_entry + "\n\n")
            put(tmp_path, "/etc/fstab")
        with settings(warn_only=True):
            run('mkdir ' + mountpoint)
            run('umount ' + mountpoint)
            run('mount ' + mountpoint)
Exemplo n.º 6
0
 def get_screen_logs(self, path, SCREEN_LOGDIR='/opt/stack/screen-logs'):
     with settings(host_string=self.host_string, warn_only=True):
         p = '/tmp/logs'
         run('mkdir -p {p}'.format(p=p))
         run('find {sl} -type l -exec cp "{{}}" {p} '
             '\;'.format(sl=SCREEN_LOGDIR, p=p))
         get(p, path)
Exemplo n.º 7
0
def dump_db(dumpfile="pootle_DB_backup.sql"):
    """Dumps the DB as a SQL script and downloads it"""
    require('environment', provided_by=[production, staging])

    if ((isfile(dumpfile) and confirm('\n%s already exists locally. Do you '
        'want to overwrite it?' % dumpfile, default=False))
        or not isfile(dumpfile)):

        remote_filename = '%s/%s' % (env['project_path'], dumpfile)

        if ((exists(remote_filename) and confirm('\n%s already exists. Do you '
            'want to overwrite it?' % remote_filename, default=False))
            or not exists(remote_filename)):

            print('\nDumping DB...')

            with settings(hide('stderr')):
                sudo('mysqldump -u %s -p %s > %s' % (env['db_user'],
                                                     env['db_name'],
                                                     remote_filename))
                get(remote_filename, '.')
        else:
            print('\nAborting.')
    else:
        print('\nAborting.')
Exemplo n.º 8
0
 def _create_keypair(self, key_name):
     output_lines = self.ec2_base._shell_with_ec2_env(
         'euca-describe-keypairs', True).split('\n')
     for line in output_lines:
         entries = [k for k in line.split('\t')]
         if entries:
             if key_name in entries[0]:
                 return
     username = self.inputs.host_data[self.cfgm_ip]['username']
     password = self.inputs.host_data[self.cfgm_ip]['password']
     with hide('everything'):
         with settings(
             host_string='%s@%s' % (username, self.cfgm_ip),
                 password=password, warn_only=True, abort_on_prompts=True):
             rsa_pub_arg = '.ssh/id_rsa'
             self.logger.debug('Creating keypair')
             if exists('.ssh/id_rsa.pub'):  # If file exists on remote m/c
                 self.logger.debug('Public key exists. Getting public key')
             else:
                 self.logger.debug('Making .ssh dir')
                 run('mkdir -p .ssh')
                 self.logger.debug('Removing id_rsa*')
                 run('rm -f .ssh/id_rsa*')
                 self.logger.debug('Creating key using : ssh-keygen -f -t rsa -N')
                 run('ssh-keygen -f %s -t rsa -N \'\'' % (rsa_pub_arg))
                 self.logger.debug('Getting the created keypair')
             get('.ssh/id_rsa.pub', '/tmp/')
             openstack_host = self.inputs.host_data[self.inputs.openstack_ip]
             copy_file_to_server(openstack_host, '/tmp/id_rsa.pub', '/tmp',
                                 'id_rsa.pub')
             self.ec2_base._shell_with_ec2_env(
                 'euca-import-keypair -f /tmp/id_rsa.pub %s' % (self.key), True)
Exemplo n.º 9
0
def backup():
    "retrieve an SQL dump of webpad/jsbin data"

    pw = secrets.load_secrets(env['host'])['jsbin_pw']
    run('mysqldump -u jsbin -p%s jsbin > jsbin.sql' % pw)
    get('jsbin.sql', 'jsbin.%(host)s.dump.sql')
    run('rm jsbin.sql')
Exemplo n.º 10
0
def dumpjson(remote_base_path, local_base_path, fixtures):
    """Refreshes out fixtures from our DB and copy them to the local machine.

    ``remote_base_path`` is the base Django path on the remote (the folder containing the
    manage.py file). ``local_base_path`` is the same thing, but locally.

    The ``fixture`` argument is a list of fixtures to dump. The format of each item of the list
    if ``(appname, models_to_dump, dest_fixture)``. Example:

    [
        ('myapp', ['Poll', 'Question'], 'initial_data'),
        ('otherapp', ['SomeModel'], 'initial_data'),
        ('flatpages_i18n', [], 'myapp/fixtures/flatpages.json'),
    ]

    ``dest_fixture`` can be either a json name (which will be expanded into a full path) or a full
    path, which will be left untouched.
    """

    with cd(remote_base_path):
        for app, models, dest in fixtures:
            if models:
                models_str = ' '.join('%s.%s' % (app, model) for model in models)
            else:
                models_str = app
            if '/' not in dest:
                # short dest, expand
                dest = '%s/fixtures/%s.json' % (app, dest)
            cmd = '../env/bin/python manage.py dumpdata %s | python -m json.tool > /tmp/fixture.json' % models_str
            run(cmd)
            ldest = op.join(local_base_path, dest)
            if op.exists(ldest):
                local('rm %s' % ldest)
            get('/tmp/fixture.json', ldest)
Exemplo n.º 11
0
def get_remote_dump(filename=None, local_filename=None, rsync=True):
    """ do a remote database dump and copy it to the local filesystem """
    # future enhancement, do a mysqldump --skip-extended-insert (one insert
    # per line) and then do rsync rather than get() - less data transferred on
    # however rsync might need ssh keys etc
    require('user', 'host', 'port', provided_by=env.valid_envs)
    delete_after = False
    if filename is None:
        filename = '/tmp/db_dump.sql'
    if local_filename is None:
        # set a default, but ensure we can write to it
        local_filename = './db_dump.sql'
        if not _local_is_file_writable(local_filename):
            # if we have to use /tmp, delete the file afterwards
            local_filename = '/tmp/db_dump.sql'
            delete_after = True
    else:
        # if the filename is specified, then don't change the name
        if not _local_is_file_writable(local_filename):
            raise Exception(
                'Cannot write to local dump file you specified: %s' % local_filename)
    if rsync:
        _tasks('dump_db:' + filename)
        local("rsync -vz -e 'ssh -p %s' %s@%s:%s %s" % (
            env.port, env.user, env.host, filename, local_filename))
    else:
        _tasks('dump_db:' + filename)
        get(filename, local_path=local_filename)
    sudo_or_run('rm ' + filename)
    return local_filename, delete_after
Exemplo n.º 12
0
    def __patch_jenkins_config( self ):
        """
        A context manager that retrieves the Jenkins configuration XML, deserializes it into an
        XML ElementTree, yields the XML tree, then serializes the tree and saves it back to
        Jenkins.
        """
        config_file = StringIO( )
        if run( 'test -f ~/config.xml', quiet=True ).succeeded:
            fresh_instance = False
            get( remote_path='~/config.xml', local_path=config_file )
        else:
            # Get the in-memory config as the on-disk one may be absent on a fresh instance.
            # Luckily, a fresh instance won't have any configured security.
            fresh_instance = True
            config_url = 'http://localhost:8080/computer/(master)/config.xml'
            with hide( 'output' ):
                config_file.write( run( 'curl "%s"' % config_url ) )
        config_file.seek( 0 )
        config = ElementTree.parse( config_file )

        yield config

        config_file.truncate( 0 )
        config.write( config_file, encoding='utf-8', xml_declaration=True )
        if fresh_instance:
            self.__service_jenkins( 'stop' )
        try:
            put( local_path=config_file, remote_path='~/config.xml' )
        finally:
            if fresh_instance:
                self.__service_jenkins( 'start' )
            else:
                log.warn( 'Visit the Jenkins web UI and click Manage Jenkins - Reload '
                          'Configuration from Disk' )
Exemplo n.º 13
0
def dump_db(dumpfile="pathagarh_DB_backup.sql"):
    """Dumps the DB as a SQL script and downloads it"""
    require('environment', provided_by=[production, staging])

    if isdir(dumpfile):
        print("dumpfile '%s' is a directory! Aborting." % dumpfile)

    elif (not isfile(dumpfile) or
          confirm('\n%s already exists locally. Do you want to overwrite it?'
                  % dumpfile, default=False)):

              remote_filename = '%s/%s' % (env['project_path'], dumpfile)

              if (not exists(remote_filename) or
                  confirm('\n%s already exists. Do you want to overwrite it?'
                          % remote_filename, default=False)):

                      print('\nDumping DB...')

                      with settings(hide('stderr')):
                          run('mysqldump -u %s %s %s > %s' %
                              (env['db_user'], env['db_password_opt'],
                               env['db_name'], remote_filename))
                          get(remote_filename, '.')
                          run('rm %s' % (remote_filename))
              else:
                  print('\nAborting.')
    else:
        print('\nAborting.')
Exemplo n.º 14
0
    def update_postfix_master_cf(self):
        master_cf_orig = StringIO()
        get('/etc/postfix/master.cf', master_cf_orig)
        master_cf_orig.seek(0)
        lines = [l.rstrip() for l in master_cf_orig.readlines()]
        for n, l in enumerate(lines):
            if l.startswith('{name}_mailer'.format(name=self.settings.name)):
                found = True
                break
        else:
            found = False

        if found:
            lines = lines[0:n] + lines[n+3:]
        lines.extend([
                '{name}_mailer  unix  -       n       n       -       -       pipe'.format(name=self.settings.name),
                '  flags=FR user={user} argv={instance_code_dir}/bin/mailpost http://{host_name}/got_mail {upload_dir}'.format(
                    user=self.user,
                    instance_code_dir=self.settings.instance_code_dir,
                    host_name=self.settings.host_name,
                    upload_dir=self.settings.upload_dir),
                '  ${nexthop} ${user}',
            ])
        master_cf_new = StringIO('\n'.join(lines) + '\n')
        put(master_cf_new, '/etc/postfix/master.cf', mode=0o644)
Exemplo n.º 15
0
Arquivo: sync.py Projeto: nifei/test
def update_from_remote(host,shell):
    with settings(host_string='test@%s'%host,shell=shell):
        get('ServerTest.sh','ServerTest.sh')
        get('ClientTest.sh','ClientTest.sh')
        get('EndServerTest.sh','EndServerTest.sh')
        get('TCP/RunServer.sh','TCP/RunServer.sh')
        get('TCP/RunClient.sh','TCP/RunClient.sh')
Exemplo n.º 16
0
def backup():
    from datetime import datetime
    for name in ['agenda.db', 'database.log']:
        filename = datetime.now().strftime('%Y-%m-%d-%H-%M') + '-' + name
        local_path = os.path.join(LOCAL_REPO, 'backup', filename)
        get('%(var)s/%(name)s' % dict(paths, name=name), local_path)
        local("gzip '%s'" % local_path)
Exemplo n.º 17
0
def backup_db():
    now = datetime.datetime.now()
    filename = now.strftime("ocd-%Y-%m-%d-%H-%M.sql.gz")
    fullpath = posixpath.join(env.backup_dir, filename)
    run('sudo -u postgres pg_dump {} | gzip > {}'.format(
        env.ocuser, fullpath))
    operations.get(fullpath)
Exemplo n.º 18
0
def mac_numpy_release():
    with cd(mac_tmp + "/numpy"):
        run("paver sdist")
        run("paver dmg -p 2.5")
        run("paver dmg -p 2.6")
        run("paver dmg -p 2.7")
        get("release/installers/*.dmg", "release/")
Exemplo n.º 19
0
def _get_environment(name):
    """
    Get user custom environment variables
    """
    remote_path = '/etc/supervisor/conf.d/%s.conf' % name
    local_path = tempfile.mktemp(".conf")
    get(remote_path, local_path)

    env = {}
    section = 'program:%s' % name
    if local_path:
        parser = ConfigParser()
        parser.read(local_path)
        if parser.has_option(section, 'environment'):
            environ = parser.get(section, 'environment')
            for entry in environ.split(','):
                key, val = entry.split('=')

                # remove start and trailing quotes
                if val[0] in ['"', "'"] and val[0] == val[-1]:
                    val = val.strip(val[0])

                env[key] = val

    return env
Exemplo n.º 20
0
    def _create_keypair(self, key_name):
        if key_name in [str(key.id) for key in self.obj.keypairs.list()]:
            return
        username = self.inputs.host_data[self.cfgm_ip]['username']
        password = self.inputs.host_data[self.cfgm_ip]['password']
        #with hide('everything'):
        if True:
            with settings(
                host_string='%s@%s' % (username, self.cfgm_ip),
#                    password=password, warn_only=True, abort_on_prompts=False):
                    password=password, warn_only=True, abort_on_prompts=True):
                rsa_pub_arg = '.ssh/id_rsa'
                self.logger.debug('Creating keypair') 
                if exists('.ssh/id_rsa.pub'):  # If file exists on remote m/c
                    self.logger.debug('Public key exists. Getting public key') 
                    get('.ssh/id_rsa.pub', '/tmp/')
                else:
                    self.logger.debug('Making .ssh dir')
                    run('mkdir -p .ssh')
                    self.logger.debug('Removing id_rsa*')
                    run('rm -f .ssh/id_rsa*')
                    self.logger.debug('Creating key using : ssh-keygen -f -t rsa -N') 
                    run('ssh-keygen -f %s -t rsa -N \'\'' % (rsa_pub_arg))
                    self.logger.debug('Getting the created keypair')
                    get('.ssh/id_rsa.pub', '/tmp/')
                self.logger.debug('Reading publick key')
                pub_key = open('/tmp/id_rsa.pub', 'r').read()
                self.obj.keypairs.create(key_name, public_key=pub_key)
Exemplo n.º 21
0
Arquivo: dump.py Projeto: nifei/test
def collect_log_in_topo(topo):
    protocol = topo['protocol']
    if not os.path.isfile('./tmp/%s.tokens.csv' % topo['runId']):
        return None
    logs = {}
    with open('./tmp/%s.tokens.csv' % topo['runId'], 'rb') as cf:
        reader = csv.reader(cf)
        for row in reader:
            token = row[1]
            connection = topo['connections'][row[0]]
            server_log = '%s/log/%s.server.log'%(protocol,token)
            client_log = '%s/log/%s.client.log'%(protocol,token)
            topo['hosts'][connection['server']].setdefault('logs',[])
            topo['hosts'][connection['client']].setdefault('logs',[])
            topo['hosts'][connection['server']].setdefault('load.logs',[])
            topo['hosts'][connection['client']].setdefault('load.logs',[])
            if connection['type'] == 'Test':
                topo['hosts'][connection['server']]['logs'].append(server_log)
                topo['hosts'][connection['client']]['logs'].append(client_log)
                logs.setdefault(row[0],{})
                logs[row[0]]['server'] = server_log
                logs[row[0]]['client'] = client_log
            else:
                topo['hosts'][connection['server']]['load.logs'].append(server_log)
                topo['hosts'][connection['client']]['load.logs'].append(client_log)
        cf.close()
    for (host_id, host) in topo['hosts'].items():
        with settings(host_string='test@%s:%s'%(host['login_ip'],host['login_port']), shell=host['shell']):
            for log in host['logs']:
                get(log,log)
           # for log in host['load.logs']:
           #     get(log,log)
    return logs
Exemplo n.º 22
0
def get_kraken_config(server, instance):
    """Get kraken configuration of a given instance"""
    # TODO this task is never used and it looks like a function (inconsistent)

    instance = get_real_instance(instance)
    
    with settings(host_string=env.make_ssh_url(server)):
        config_path = "%s/%s/kraken.ini" % (env.kraken_basedir, instance.name)

        # first get the configfile here
        temp_file = StringIO.StringIO()
        if exists(config_path):
            get(config_path, temp_file)
        else:
            print(red("ERROR: can't find %s" % config_path))
            exit(1)

        config = ConfigParser.RawConfigParser(allow_no_value=True)
        config_text = temp_file.getvalue()
        config.readfp(BytesIO(config_text))

        if 'GENERAL' in config.sections():
            return config
        else:
            return None
Exemplo n.º 23
0
def get_database():
    dump_database()
    with cd(os.path.join('/home', env.home)):
        filename = 'i4p_db_%s.sql' % time.strftime('%Y%m%d')
        compressed_filename = '%s.bz2' % filename
        run('bzip2 -9 -c %s > %s' % (filename, compressed_filename))
        get(compressed_filename, 'current_database.sql.bz2')
Exemplo n.º 24
0
    def _provide_imported_keypair( self, ec2_keypair_name, private_key_path, overwrite_ec2=False ):
        """
        Expects to be running in a Fabric task context!

        Ensures that 1) a key pair has been imported to EC2 under the given name, 2) a matching
        private key exists on this box at the given path and 3) the corresponding public key
        exists at the given path with .pub appended.

        If there is no private key at the given path on this box, one will be created. If there
        already is a imported key pair in EC2, it is checked to match the local public key. If
        they don't match an exception will be raised (overwrite_ec2 is False) or the EC2 key pair
        will be replaced with a new one by importing the local public key. The public key itself
        will be tracked in S3. See _provide_generated_keypair for details.

        :param ec2_keypair_name: the name of the keypair in EC2
        :param private_key_path: the path to the private key on this box (tilde will be expanded)
        :return: the actual contents of the private and public keys as a tuple in that order
        """
        key_file_exists = run( 'test -f %s' % private_key_path, quiet=True ).succeeded
        if not key_file_exists:
            run( "ssh-keygen -N '' -C '%s' -f '%s'" % ( ec2_keypair_name, private_key_path ) )
        ssh_privkey = StringIO( )
        get( remote_path=private_key_path, local_path=ssh_privkey )
        ssh_privkey = ssh_privkey.getvalue( )
        ssh_pubkey = StringIO( )
        get( remote_path=private_key_path + '.pub', local_path=ssh_pubkey )
        ssh_pubkey = ssh_pubkey.getvalue( )
        self.ctx.register_ssh_pubkey( ec2_keypair_name, ssh_pubkey, force=overwrite_ec2 )
        return ssh_privkey, ssh_pubkey
Exemplo n.º 25
0
def _download_data():
    """
    Download the latest playgrounds data CSV.
    """
    print "Cloning database from %s..." % env.settings

    get(remote_path="%(repo_path)s/playgrounds.db" % env, local_path="playgrounds.db")
def setup_chef_server(local_dir, cookbooks):
    _userize_file("/etc/chef-server/admin.pem")
    for cb in cookbooks:
        _use_cookbook(*cb)

    _userize_file("/etc/chef-server/chef-validator.pem")
    operations.get('~/chef-validator.pem', str(local_dir))
Exemplo n.º 27
0
def get_db_mysql(local_db_name, remote_db_name, dump_only=False):
    """
    dump db on server, import to local mysql (must exist)
    """
    create_mycnf()
    my_cnf_file = _get_my_cnf_name()
    date = datetime.datetime.now().strftime("%Y-%m-%d-%H%M")
    dump_name = 'dump_%s_%s-%s.sql' % (env.project_name, env.env_prefix, date)
    remote_dump_file = os.path.join(env.project_dir, dump_name)
    local_dump_file = './%s' % dump_name
    run('mysqldump'
        # for pg conversion!
        # ' --compatible=postgresql'
        # ' --default-character-set=utf8'
        ' --defaults-file={cnf_file}'
        ' {database} > {file}'.format(
            cnf_file=my_cnf_file,
            database=remote_db_name,
            file=remote_dump_file,
            **env
        )
    )
    get(remote_path=remote_dump_file, local_path=local_dump_file)
    run('rm %s' % remote_dump_file)
    if not dump_only:
        local('mysql -u root %s < %s' % (local_db_name, local_dump_file))
        local('rm %s' % local_dump_file)
def downloadGlobaleaksAsset(file, asset_root, identity_file):
    u = env.host_string.split("@")[0]
    cmd = "ssh -f -i %s %s 'sudo cp %s /%s && sudo chown %s /%s'" % (
        identity_file,
        translate_host_string_ssh(env.host_string),
        os.path.join(asset_root, file),
        os.path.join("home", u),
        "%(u)s:%(u)s" % ({"u": u}),
        os.path.join("home", u, file),
    )

    with settings(hide("everything"), warn_only=True):
        local(cmd, capture=True)

        content = io.BytesIO()
        env.key_filename = identity_file

        with cd("/%s" % os.path.join("home", u)):
            get(file, content)

        cmd = "ssh -f -i %s %s 'rm /%s'" % (
            identity_file,
            translate_host_string_ssh(env.host_string),
            os.path.join("home", u, file),
        )

        local(cmd, capture=True)

    return content
Exemplo n.º 29
0
def dump_db(dumpfile="pootle_DB_backup.sql"):
    """Dumps the DB as a SQL script and downloads it"""
    require("environment", provided_by=[production, staging])

    if isdir(dumpfile):
        abort("dumpfile '%s' is a directory! Aborting." % dumpfile)

    elif not isfile(dumpfile) or confirm(
        "\n%s already exists locally. Do you want to overwrite it?" % dumpfile, default=False
    ):

        remote_filename = "%s/%s" % (env["project_path"], dumpfile)

        if not exists(remote_filename) or confirm(
            "\n%s already exists. Do you want to overwrite it?" % remote_filename, default=False
        ):

            print("\nDumping DB...")

            with settings(hide("stderr")):
                run(
                    "mysqldump -u %s %s %s > %s"
                    % (env["db_user"], env["db_password_opt"], env["db_name"], remote_filename)
                )
                get(remote_filename, ".")
                run("rm %s" % (remote_filename))
        else:
            abort("\nAborting.")
    else:
        abort("\nAborting.")
Exemplo n.º 30
0
    def clone_data(self, instance):
        dump_file = "%s.json" % str(int(time.time()))

        # Ignore errors on these next steps, so that we are sure we clean up
        # no matter what
        with settings(warn_only=True) and cd(self.project_paths[instance]):
            # Dump the database to a file...
            self.run_management_command(
                instance,
                '%s --all > %s' % (self.dumpdata_command, dump_file)
            )

            # The download that file, all uploaded files and rm the dump file
            get("%s%s" % (self.project_paths[instance], dump_file), dump_file)
            self.run('rm %s' % dump_file)

            self.syncdb('local')
            self.local_management_command('flush --noinput')

            from django.db import connection, transaction

            cursor = connection.cursor()
            cursor.execute("DELETE FROM django_content_type;")

            for table in self.local_tables_to_flush:
                cursor.execute("DELETE FROM %s;" % table)

            transaction.commit_unless_managed()
            self.local_management_command('%s %s' % (self.loaddata_command,
                                                     dump_file))

        # ... then cleanup the dump file
        local('rm %s' % dump_file)
Exemplo n.º 31
0
 def read_file(config_host, config_path):
     data = StringIO()
     with settings(host_string='%s@%s' % (env.user, config_host)):
         with hide('stderr', 'stdout'):
             temp_dir = run('mktemp -d /tmp/prestoadmin.XXXXXXXXXXXXXX')
         try:
             get(config_path, data, use_sudo=True, temp_dir=temp_dir)
         finally:
             run('rm -rf %s' % temp_dir)
     data.seek(0)
     return data
Exemplo n.º 32
0
def _transfer_all_artifacts():
    services = "sessiond session_proxy pcrf ocs pipelined ingress"
    run(f'fab transfer_artifacts:services="{services}",'
        'get_core_dump=True', )
    # Copy log files out from the node
    local('mkdir cwf-artifacts')
    get('*.log', 'cwf-artifacts')
    if exists("coredump.tar.gz"):
        get('coredump.tar.gz', 'cwf-artifacts')
    local('sudo mkdir -p /tmp/logs/')
    local('sudo mv cwf-artifacts/* /tmp/logs/')
Exemplo n.º 33
0
def retrieve(appname=None):
    """fab -H username@host retrieve:appname"""
    appname = appname or os.path.split(os.getcwd())[-1]
    appfolder = applications + '/' + appname
    filename = '%s.zip' % appname
    with cd(appfolder):
        sudo('zip -r /tmp/%s *' % filename)
    get('/tmp/%s' % filename, filename)
    sudo('rm /tmp/%s' % filename)
    local('unzip %s' % filename)
    local('rm %s' % filename)
Exemplo n.º 34
0
def newrelic():
    """
    Check if newrelic is sending data to rpm.newrelic.com's data collector
    """
    if env.newrelic:
        print(green("newrelic is expected to be running on this host"))
        with prefix(env.activate):
            run("newrelic-admin validate-config {0}/{1}".format(env.path_release_current, env.newrelic['INI_FILE']))
            get("/tmp/python-agent-test.log", "{0}_data/{1}/%(path)s".format(env.project_name, env.host, env))
    else:
        print(red("We are not deploying newrelic on this host"))
Exemplo n.º 35
0
def get_test_summaries(
        gateway_host=None,
        test_host=None,
        dst_path="/tmp"):
    local('mkdir -p ' + dst_path)
    _switch_to_vm(gateway_host, "magma", "magma_dev.yml", False)
    with settings(warn_only=True):
        get(remote_path=TEST_SUMMARY_GLOB, local_path=dst_path)
    _switch_to_vm(test_host, "magma_test", "magma_test.yml", False)
    with settings(warn_only=True):
        get(remote_path=TEST_SUMMARY_GLOB, local_path=dst_path)
def package_python_freeswitch():
    """Builds freeswitch and our fake package with missing python files.

    Freeswitch doesn't properly package some of our python dependencies, so we
    need to package these ourself. Unfortunately the only way to do this is
    build from source and manually hack a package together. See the following
    upstream bugs for more info; once these are resolved we should not need to
    do this anymore:
        https://jira.freeswitch.org/browse/ESL-99
        https://jira.freeswitch.org/browse/FS-5660

    Note this will build freeswitch based on what is currently checked out in
    the local freeswitch repo.  The tag we have used in the past is v1.4.6 and
    the FS repo itself is at https://stash.freeswitch.org/scm/fs/freeswitch.git
    """
    path = '/home/vagrant/freeswitch'
    if not exists(path):
        print 'path %s does not exist on the VM, cannot package' % path
        return
    package_name = 'python-freeswitch-endaga'
    with cd(path):
        version = run('git describe --tags').strip('v')
        run('./bootstrap.sh')
        get(remote_path='modules.conf', local_path='/tmp/modules.conf.orig')
        i = open('/tmp/modules.conf.orig', 'r')
        o = open('/tmp/modules.conf', 'w')
        for line in i:
            if (line.strip().startswith("#languages/mod_python")
                    or line.strip().startswith("#applications/mod_esl")):
                o.write(line[1:])
            else:
                o.write(line)
        o.close()
        put(remote_path='modules.conf', local_path='/tmp/modules.conf')
        run('./configure')
        run('make')

    with cd('%s/libs/esl' % path):
        run('make pymod')

    with cd(path):
        run('fpm'
            ' -n %s'
            ' -s dir'
            ' -t %s'
            ' -v %s'
            ' libs/esl/python/ESL.py=/usr/lib/python2.7/dist-packages/ESL.py'
            ' libs/esl/python/_ESL.so=/usr/lib/python2.7/dist-packages/_ESL.so'
            ' src/mod/languages/mod_python/freeswitch.py='
            '/usr/share/freeswitch/scripts/freeswitch.py' %
            (package_name, env.pkgfmt, version))
        run('mkdir -p ~/endaga-packages')
        run('mv %s*.%s ~/endaga-packages/' % (package_name, env.pkgfmt))
Exemplo n.º 37
0
def get_files(remote_path, local_path):
    path_with_host_name = os.path.join(local_path, env.host)

    if not os.path.exists(path_with_host_name):
        os.makedirs(path_with_host_name)

    _LOGGER.debug('local path used ' + path_with_host_name)

    try:
        get(remote_path, path_with_host_name, use_sudo=True)
    except SystemExit:
        warn('remote path ' + remote_path + ' not found on ' + env.host)
Exemplo n.º 38
0
def get_remote_file(remote_path: str) -> str:
    """Fetches remote file as a string.

        Expects authentication to have been performed already.

        :param remote_path: Remote file path.

    """
    file = BytesIO()
    get(remote_path, file)
    contents = file.getvalue().decode()
    return contents
Exemplo n.º 39
0
    def dump(self, file_name, index_name, type_name):

        with cd(env.project_home):
            self.virtualenv(
                'thor/dump.py -e %s -f %s -i %s -t %s' %
                (env.elastic_endpoint, file_name, index_name, type_name))

            # env.run('tar cvf %s.tar.gz ' % file_name)
            try:
                get('data/%s' % file_name, '../data/%s' % file_name)
            except:
                pass
Exemplo n.º 40
0
def file_get(remote_path, local_path):
    path_with_host_name = os.path.join(local_path, env.host)

    if not os.path.exists(path_with_host_name):
        os.makedirs(path_with_host_name)

    _LOGGER.debug('local path used ' + path_with_host_name)

    if exists(remote_path, True):
        get(remote_path, path_with_host_name, True)
    else:
        warn('remote path ' + remote_path + ' not found on ' + env.host)
Exemplo n.º 41
0
def dump_db():
    tar_name = '{}.dump.{}.tar'.format(
        env.db_name, time.strftime("%Y-%m-%d-%H", time.localtime()))
    tar_fp = os.path.join('/tmp/', tar_name)
    if not exists(tar_fp):
        with cd('/tmp'):
            run('rm -rf {}.dump*'.format(evn.db_name))
            run('mongodump -d {} -o buzz.dump'.format(env.db_name))
            run('tar -czvf {} {}.dump'.format(tar_name, env.db_name))

    get(tar_fp, 'backup/')
    run('rm -rf /tmp/{}.dump*'.format(env.db_name))
Exemplo n.º 42
0
def get_parsed_remote_conf(conf_name,
                           suffix="nginx",
                           use_sudo=True):  # type: (str, str, bool) -> [str]
    if not conf_name.endswith(".conf") and not exists(conf_name):
        conf_name += ".conf"
    # cStringIO.StringIO, StringIO.StringIO, TemporaryFile, SpooledTemporaryFile all failed :(
    tempfile = mkstemp(suffix)[1]
    get(remote_path=conf_name, local_path=tempfile, use_sudo=use_sudo)
    with open(tempfile, "rt") as f:
        conf = load(f)
    remove(tempfile)
    return conf
Exemplo n.º 43
0
def load_domain_list(project_name):
    home_folder = '/home/{project_name}'.format(project_name=project_name)
    remote_domain_file_path = '{home_folder}/.domains'.format(
        home_folder=home_folder)
    if exists(remote_domain_file_path):
        domain_file = StringIO()
        get(remote_domain_file_path,
            domain_file,
            use_sudo=True,
            temp_dir='/tmp')
        return domain_file.getvalue().split()
    return []
Exemplo n.º 44
0
 def test_upload_template_handles_file_destination(self):
     """
     upload_template() should work OK with file and directory destinations
     """
     template = self.mkfile('template.txt', '%(varname)s')
     local = self.path('result.txt')
     remote = '/configfile.txt'
     var = 'foobar'
     with hide('everything'):
         upload_template(template, remote, {'varname': var})
         get(remote, local)
     eq_contents(local, var)
Exemplo n.º 45
0
def _upgrade_docker(upd, with_testing):
    def alter_config(line):
        if not re.match(r'OPTIONS=.*', line):
            return line

        to_remove = (r'\s*(--log-level=[^\s\']+\s*)|(-l \[^\s\']+\s*)',
                     r'\s*(--log-driver=[^\s\']+)')
        for pattern in to_remove:
            line = re.sub(pattern, '', line)

        return re.sub(
            r"OPTIONS='(.*)'",
            r"OPTIONS='\1 --log-driver=json-file --log-level=error'", line)

    upd.print_log("Docker before pkg upgrade " + run("docker --version"))
    helpers.remote_install(SELINUX, with_testing)
    helpers.remote_install(DOCKER, with_testing)
    upd.print_log("Docker after pkg upgrade " + run("docker --version"))

    docker_config = StringIO()
    get('/etc/sysconfig/docker', docker_config)
    current_config = docker_config.getvalue()
    new_config = '\n'.join(
        alter_config(l) for l in current_config.splitlines())

    run("cat << EOF > /etc/sysconfig/docker\n{}\nEOF".format(new_config))

    run("mkdir -p /etc/systemd/system/docker.service.d/")
    run("cat << EOF > /etc/systemd/system/docker.service.d/timeouts.conf\n"
        "{}\nEOF".format(DOCKER_TIMEOUTS_DROPIN))

    # If we restart docker here then rest of node upgrade code will be
    # executed with fresh new docker (don't know whether this is good or bad)
    # and also will results in pods/containers restart at this moment, which
    # will produce lots of events and load on node.
    # If not, then docker will be old till node reboot at the end of upgrade.
    # So we probably could comment restart part (known to work ~ok)
    run("systemctl daemon-reload")
    start_time = time.time()
    # Because of bug in our package docker could be running again at this moment
    # Maybe this is because of rpm %systemd hooks or else, so ensure it stopped
    # again before restart to prevent timeouts
    upd.print_log("===== Docker.service restart timeout has been increased to "
                  "10 min, please, don't interrupt it before timeout ======")
    res = run("bash -c 'for i in $(seq 1 5); do systemctl stop docker; done; "
              "sleep 1; systemctl restart docker;'")
    upd.print_log(
        "Docker second_stop/restart took: {} secs".format(time.time() -
                                                          start_time))
    if res.failed:
        raise helpers.UpgradeError('Failed to restart docker. {}'.format(res))
    upd.print_log(run("docker --version"))
Exemplo n.º 46
0
def install_postgresql(ver=None):
    """
    Install PostgreSQL server
    """
    # simple settings helper http://pgtune.leopard.in.ua/
    assert ver in SUPPORT_POSTGRESQL_VERSIONS or ver is None
    check_sudo()
    check_os()
    if not confirm('Do you want to install PostreSQL{}?'.format(
            ' {}'.format(ver) if ver else '')):
        return
    allow_versions = ', '.join(SUPPORT_POSTGRESQL_VERSIONS)
    while ver not in SUPPORT_POSTGRESQL_VERSIONS:
        ver = prompt(
            'Write PostgreSQL version you need ({}):'.format(allow_versions),
            default=SUPPORT_POSTGRESQL_VERSIONS[-1])
    print_green('INFO: Install PostreSQL {}...'.format(ver))
    set_apt_repositories(POSTGRESQL_REPOSITORIES,
                         POSTGRESQL_REPOS_INSTALL_KEYS_COMMANDS,
                         subconf_name='postgres')
    apt_update()
    apt_install(
        'postgresql-{ver} postgresql-server-dev-{ver} libpq-dev'.format(
            ver=ver),
        noconfirm=True)
    set_postgresql_user_password(
        'postgres', password_prompt('Set password to superuser postgres'))
    la = prompt(
        'Set listen_addresses (hostname or ip, comma separated; set * for all)',
        default='localhost',
        validate='[\w\.\-\*]+').strip()
    t = BytesIO()
    postgresql_conf = '/etc/postgresql/{}/main/postgresql.conf'.format(ver)
    get(postgresql_conf, local_path=t, use_sudo=True)
    t = BytesIO(
        re.sub(br"#listen_addresses = 'localhost'",
               r"listen_addresses = '{}'".format(la).encode(), t.getvalue()))
    put(t, postgresql_conf, use_sudo=True)
    sudo('chown postgres:postgres {}'.format(postgresql_conf))
    sudo('chmod 644 {}'.format(postgresql_conf))
    hba = '/etc/postgresql/{}/main/pg_hba.conf'.format(ver)
    sed(hba, r'(local\s+all\s+all\s+)peer', r'\1md5', use_sudo=True)
    if confirm('Do you want to allow connect to PostgreSQL from out?'):
        append(
            hba,
            'host     all             all             0.0.0.0/0               md5',
            use_sudo=True)
    install_postgis(postgres_ver=ver)
    if confirm('Do you want to restart PostgreSQL?'):
        service_restart('postgresql')
    print_green('INFO: Install PostreSQL {}... OK'.format(ver))
Exemplo n.º 47
0
def get_test_summaries(
    gateway_host=None,
    test_host=None,
    dst_path="/tmp",
):
    local('mkdir -p ' + dst_path)

    # TODO we may want to zip up all these files
    _switch_to_vm_no_provision(gateway_host, "magma", "magma_dev.yml")
    with settings(warn_only=True):
        get(remote_path=TEST_SUMMARY_GLOB, local_path=dst_path)
    _switch_to_vm_no_provision(test_host, "magma_test", "magma_test.yml")
    with settings(warn_only=True):
        get(remote_path=TEST_SUMMARY_GLOB, local_path=dst_path)
Exemplo n.º 48
0
def run_demo():
    run("git clone https://github.com/srcole/demo_OSG_python")
    with cd('demo_OSG_python'):
        run("chmod +x create_virtenv.sh")
        run("./create_virtenv.sh")
        run("rm -R python_virtenv_demo")
        run("mv lfp_set/ /stash/user/" + env.user + "/lfp_set/")
        run("tar -cvzf misshapen.tar.gz misshapen")
        run("rm -R misshapen")
        run("mkdir Log")
        run("condor_submit sub_PsTs.submit")
        # Need to wait until done running; should be less than 5 minutes
        time.sleep(300)
        get("./out*")
Exemplo n.º 49
0
def move_data(src, dest, data=''):
    """ 
    move database data with dumpdata & loaddata
    """
    _setup_env()
    filename = '%s_%s_%s.json' % (os.path.join(
        '/tmp/', env.project_name), data, env.role)
    dump_data(data, filename)

    dest_file = os.path.join('/tmp/', os.path.basename(filename))
    if not env.is_dev:
        get(os.path.join(env.stage['path'], tpl), dest_file)

        upload_file(dest_file, dest)
Exemplo n.º 50
0
def _deploy_lte_packages(repo: str, magma_root: str):
    repo_name = _get_repo_name(repo)

    # Grab all the build artifacts we need from the CI node
    get('/tmp/packages.tar.gz', 'packages.tar.gz')
    get('/tmp/packages.txt', 'packages.txt')
    get('/tmp/magma_version', 'magma_version')
    get(
        f'{repo_name}/{magma_root}/lte/gateway/release/magma.lockfile.debian',
        'magma.lockfile.debian',
    )

    with open('magma_version') as f:
        magma_version = f.readlines()[0].strip()
    s3_path = f's3://magma-images/gateway/{magma_version}'
    local(
        f'aws s3 cp packages.txt {s3_path}.deplist '
        f'--acl bucket-owner-full-control',
    )
    local(
        f'aws s3 cp magma.lockfile.debian {s3_path}.lockfile.debian '
        f'--acl bucket-owner-full-control',
    )
    local(
        f'aws s3 cp packages.tar.gz {s3_path}.deps.tar.gz '
        f'--acl bucket-owner-full-control',
    )
Exemplo n.º 51
0
def new_log():
    """Show only lines since the last time the log was echoed."""
    if os.path.exists("lnt.log"):
        lines = {line for line in open("lnt.log", 'r').readlines()}
    else:
        lines = set()
    with hide('warnings'):
        get(
            '/srv/lnt/install/lnt.log',
            'lnt.log',
        )
    new_lines = {line for line in open("lnt.log", 'r').readlines()}
    for l in new_lines - lines:
        print ' '.join(l.split()[2:]),
Exemplo n.º 52
0
def download(url):
    result = run('which youtube-dl', warn_only=True)
    if result == '':
        raise ProgramNotFound

    tmpdir = run('mktemp -d /tmp/ytsnarf-tmp-XXXXXXXX')
    outdir = os.path.join('/tmp', tmpdir)
    result = run('youtube-dl --no-color --output={}/%\(title\)s.%\(ext\)s {}'.format(outdir, url), warn_only=True)
    if 'ERROR' in result:
        run('rm -rf {}'.format(outdir))
        raise YoutubeDLError(result)
    else:
        get(os.path.join(outdir, '*'), local_path='.')
        run('rm -rf {}'.format(outdir))
Exemplo n.º 53
0
def _node_move_config():
    config = 'kuberdock.json'
    old_path = os.path.join(
        "/usr/libexec/kubernetes/kubelet-plugins/net/exec/kuberdock/", config)
    new_path = os.path.join(NODE_DATA_DIR, config)
    with quiet():
        run("mv {} {}".format(old_path, new_path))
    fd = StringIO()
    get(new_path, fd)
    data = json.loads(fd.getvalue())
    data['network_interface'] = NODE_TOBIND_EXTERNAL_IPS
    new_fd = StringIO()
    json.dump(data, new_fd)
    put(new_fd, new_path)
Exemplo n.º 54
0
 def _create_keypair(self):
     username = self._inputs.host_data[self._inputs.cfgm_ip]['username']
     password = self._inputs.host_data[self._inputs.cfgm_ip]['password']
     with settings(
              host_string='%s@%s' % (username, self._inputs.cfgm_ip),
                  password=password, warn_only=True, abort_on_prompts=True):
         rsa_pub_arg = '.ssh/id_rsa'
         if exists('.ssh/id_rsa.pub'):  # If file exists on remote m/c
             get('.ssh/id_rsa.pub', '/tmp/')
         else:
             run('mkdir -p .ssh')
             run('rm -f .ssh/id_rsa*')
             run('ssh-keygen -f %s -t rsa -N \'\'' % (rsa_pub_arg))
             get('.ssh/id_rsa.pub', '/tmp/')
Exemplo n.º 55
0
def replicate_acls(remote_configuration=None,
                   origin=None,
                   path=None,
                   apply=False):
    """ Replicate locally the ACLs and Unix permissions of a given path on
        a remote machine. When everything is borked locally, and you have
        a remote clone in a good health, this is handy.

        This helps correcting strange permissions errors from a well-known
        good machine.

        Usage:

            fab … acls:origin=10.0.3.37,path=/bin
            # Then:
            fab … acls:origin=10.0.3.37,path=/bin,apply=True
    """

    if origin is None or path is None:
        raise ValueError('Missing arguments')

    sys_admin_pkgs()

    local_perms_file = str(uuid.uuid4())
    remote_perms_file = str(uuid.uuid4())

    with cd('/tmp'):

        # GET correct permissions form origin server.
        with settings(host_string=origin):
            with cd('/tmp'):
                sudo('getfacl -pR "%s" > "%s"' % (path, remote_perms_file))
                get(remote_perms_file, '/tmp')
                sudo('rm "%s"' % remote_perms_file, quiet=True)

        if env.host_string != 'localhost':
            # TODO: find a way to transfer from one server to another directly.
            put(remote_perms_file)

        # gather local permissions, compare them, and reapply
        sudo('getfacl -pR "%s" > "%s"' % (path, local_perms_file))
        sudo('colordiff -U 3 "%s" "%s" || true' %
             (local_perms_file, remote_perms_file))

        sudo('setfacl --restore="%s" %s || true' %
             (remote_perms_file, '' if apply else '--test'))

        sudo('rm "%s" "%s"' % (remote_perms_file, local_perms_file),
             quiet=True)
Exemplo n.º 56
0
def _package_vagrant_zip(service, folder, cloud_host, commit_hash):
    if cloud_host != "":
        env.host_string = cloud_host
        (env.user, _, _) = split_hoststring(cloud_host)
    else:
        _vagrant()

    run("rm -rf %s" % folder)
    run("mkdir -p %s" % folder)

    with cd('magma/orc8r/cloud/deploy'):
        run('cp -pr aws/%s_appspec.yml %s/appspec.yml' % (service, folder))
        run('cp -pr aws/scripts %s/.' % folder)
        run("mkdir -p %s/ansible/roles" % folder)
        run('cp -pr %s.yml %s/ansible/main.yml' % (service, folder))
        run('cp -pr roles/aws_setup %s/ansible/roles/.' % folder)
        run('cp -pr roles/osquery %s/ansible/roles/.' % folder)
        run('cp -pr roles/service_registry %s/ansible/roles/.' % folder)

        if service == "metrics":
            run('cp -pr roles/prometheus %s/ansible/roles/.' % folder)
        else:
            run('cp -pr roles/%s %s/ansible/roles/.' % (service, folder))

        if service == "controller":
            run('cp -pr /etc/magma %s/configs' % folder)
            run('cp -pr files/scripts/setup_swagger_ui %s/scripts/.' % folder)
            run('cp -pr files/static/apidocs %s/.' % folder)
        if service == "proxy":
            run('cp -pr /etc/magma %s/configs' % folder)
            run('cp -pr roles/disk_metrics %s/ansible/roles/.' % folder)
            run('cp -pr ../../../orc8r/tools/ansible/roles/pkgrepo '
                '%s/ansible/roles/.' % folder)

    # Build Go binaries and plugins
    build()
    if service == "metrics":
        run("make -C magma/orc8r/cloud/go/services/metricsd/prometheus/prometheus-cache build"
            )

    run('cp -pr go/plugins %s' % folder)
    _copy_go_binaries(service, folder)

    pkg_name = "magma_%s_%s" % (service, commit_hash)
    with cd(folder):
        run('zip -r %s *' % (pkg_name))
    get('%s/%s.zip' % (folder, pkg_name), '%s/%s.zip' % (folder, pkg_name))
    run('rm -rf %s' % folder)
    return "%s.zip" % pkg_name
Exemplo n.º 57
0
 def test_upload_template_handles_template_dir(self):
     """
     upload_template() should work OK with template dir
     """
     template = self.mkfile('template.txt', '%(varname)s')
     template_dir = os.path.dirname(template)
     local = self.path('result.txt')
     remote = '/configfile.txt'
     var = 'foobar'
     with hide('everything'):
         upload_template('template.txt',
                         remote, {'varname': var},
                         template_dir=template_dir)
         get(remote, local)
     eq_contents(local, var)
Exemplo n.º 58
0
def upsert_upload(new_conf, name="default", use_sudo=True):
    conf_name = "/etc/nginx/sites-enabled/{nginx_conf}".format(nginx_conf=name)
    if not conf_name.endswith(".conf") and not exists(conf_name):
        conf_name += ".conf"
    # cStringIO.StringIO, StringIO.StringIO, TemporaryFile, SpooledTemporaryFile all failed :(
    tempfile = mkstemp(name)[1]
    get(remote_path=conf_name, local_path=tempfile, use_sudo=use_sudo)
    with open(tempfile, "rt") as f:
        conf = load(f)
    new_conf = new_conf(conf)
    remove(tempfile)

    sio = StringIO()
    sio.write(dumps(new_conf))
    return put(sio, conf_name, use_sudo=use_sudo)
Exemplo n.º 59
0
def get_remote_dump(filename='/tmp/db_dump.sql',
                    local_filename='./db_dump.sql',
                    rsync=True):
    """ do a remote database dump and copy it to the local filesystem """
    # future enhancement, do a mysqldump --skip-extended-insert (one insert
    # per line) and then do rsync rather than get() - less data transferred on
    # however rsync might need ssh keys etc
    require('user', 'host', provided_by=env.valid_envs)
    if rsync:
        _tasks('dump_db:' + filename + ',for_rsync=true')
        local("rsync -vz -e 'ssh -p %s' %s@%s:%s %s" %
              (env.port, env.user, env.host, filename, local_filename))
    else:
        _tasks('dump_db:' + filename)
        get(filename, local_path=local_filename)
    sudo_or_run('rm ' + filename)
def replicate_media_on_local():
    require('stage', provided_by=(staging, production))
    with cd('/home/{}/{}'.format(env.user, ocean)):
        with prefix(". .env"):
            run('docker-compose -f {0} exec -T'
                ' web tar -czvf ./media.tar.gz  media'.format(
                    env.docker_compose_file))
            web_container = run(
                'echo $(docker-compose -f {} ps -q web)'.format(
                    env.docker_compose_file))
            run('docker cp {web_container}:/{ocean}/media.tar.gz ./'.format(
                web_container=web_container, ocean=ocean))
            if exists('media.tar.gz'):
                get('media.tar.gz', '%(dirname)s')
    local('tar -xzvf ./media.tar.gz')
    local('rm media.tar.gz')