Exemplo n.º 1
0
def deploy(new_apps=True, new_app_conf=True):
    #  test configuration start
    puts_green('Iniciando DEPLOY...', bg=107)
    if not test_configuration():
        if not console.confirm("Configuration test %s! Do you want to continue?" % red('failed'), default=False):
            abort("Aborting at user request.")
            #  test configuration end
    _verify_sudo()
    if env.ask_confirmation:
        if not console.confirm("Are you sure you want to deploy in %s?" % red(env.project.upper()), default=False):
            abort("Aborting at user request.")
    puts_green('Start deploy...')
    start_time = datetime.now()

    if env.repository_type == 'hg':
        hg_pull()
    else:
        git_pull()
    if new_apps:
        _install_requirements()
    if new_app_conf:
        _upload_nginx_conf()
        _upload_rungunicorn_script()
        _upload_supervisord_conf()

    _deploy_django_project()
    _collect_static()

    _prepare_media_path()  # fica porque pode ser alterado em uma review de código
    _supervisor_restart()

    end_time = datetime.now()
    finish_message = '[%s] Correctly deployed in %i seconds' % \
                     (green(end_time.strftime('%H:%M:%S')), (end_time - start_time).seconds)
    puts(finish_message)
Exemplo n.º 2
0
def download():
    f = env._ui.file
    s = env._ui.url

    if os.path.isfile(f):
        newf = False
    else:
        puts('[{0}]: "{1} file does not exist'.format(env.msgid, f))
        newf = download_file(f, s)

    mtime = file_timestamp(f)

    if mtime < time.time() - env.ACCEPTABLE:
        # if mtime is less than now - n days, it may be stale.

        newtime = time.time() - (env.ACCEPTABLE / 2)

        if newf is True:
            # if we just downloaded the file it isn't stale yet
            os.utime(f, (newtime, newtime))
        else:
            # definitley stale, must download it again.
            newf = download_file(f, s)
            if mtime == file_timestamp(f):
                # if the source is stale, modify mtime so we don't
                # download it for a few days.
                os.utime(f, (newtime, newtime))
    else:
        # otherwise, mtime is within the window of n days, and we can do nothing.
        puts('[{0}]: "{1}" is up to date'.format(env.msgid, f))
def install():
    """
    Task to run all the install tasks.
    """

    current_platform = check_platform()
    check_python_version()
    check_packages(current_platform)

    clone()

    setup_virtual_env()
    all_deps()

    puts(green('\nOK, looking good. Now let\'s configure our settings.'))

    db_host = prompt("Enter the address of your database server. ", default="localhost")
    db_user = prompt("Enter your database user name. ")
    db_password = getpass.getpass("Enter your database user's password. ")
    db_database = prompt("Enter the name of your database. ")
    db_port = prompt("Enter the port for the database. ", default="3306")

    check_mysql_connection(db_user, db_password, db_host, db_database, db_port)
    create_local_settings(db_user, db_password, db_host, db_database, db_port)

    domain = prompt("Enter the domain for your site. [example: myawesometour.com] ")
    title = prompt("Enter the tilte of you tour. [example: My Awesome Tour] ")

    setup_application(domain)

    apache_config(domain)

    setup_client(domain, title)

    chown()
Exemplo n.º 4
0
def run(command, shell=True, pty=True, combine_stderr=None, quiet=False,
        warn_only=False, stdout=None, stderr=None, timeout=None,
        shell_escape=None):
    result = None

    env.disable_known_hosts = True

    if (not stdout) and (not stderr):
        stdout, stderr = log.stdio()
    try:
        result = fabric_run(command, shell=shell, pty=pty,
                            combine_stderr=combine_stderr, quiet=quiet,
                            warn_only=warn_only, stdout=stdout,
                            stderr=stderr, timeout=timeout,
                            shell_escape=shell_escape)

    except:
        puts("[%s] %s %s" % (env.host_string, command, red("failed")))
        if hasattr(stdout, "print_recent"):
            stdout.print_recent()
        raise
    else:
        puts("[%s] %s %s" % (env.host_string, command, green("success")))

    return result
def check_for_dependencies():
    current_platform = check_platform()
    check_python_version()
    check_packages(current_platform)

    puts(green("\nOK you're all set. now run the following:"))
    puts(cyan("fab install"))
Exemplo n.º 6
0
def build(treeish='head'):
    """Build a release."""
    version = local("git describe {}".format(treeish), capture=True)

    with settings(hide('warnings'), warn_only=True):
        cmd = "git diff-index --quiet {} --".format(treeish)
        is_committed = local(cmd).succeeded
        cmd = "git branch -r --contains {}".format(version)
        is_pushed = local(cmd, capture=True)

    if not is_committed:
        prompt = "Uncommitted changes. Continue?"
        if not confirm(prompt, default=False):
            abort("Canceled.")

    if not is_pushed:
        prompt = "Commit not pushed. Continue?"
        if not confirm(question=prompt, default=False):
            abort("Canceled.")

    output = "/tmp/{}.tar.gz".format(version)
    prefix = "{}/".format(version)
    cmd = "git archive --prefix={prefix} --format=tar.gz --output={output} {version}:src"
    local(cmd.format(prefix=prefix, output=output, version=version))
    puts("\nBuilt: {} at: {}".format(version, output))
    return output
def apache_config(domain):
    """
    Create a sample Apache config for the user.
    $path = OTB_DIR
    $domain # will need to add `api.` for django
    $proc = maybe 4 random chars
    $client-path = CLIENT_DIR
    """

    # Just incase the user put in the domain like `http://awesometour.com`
    domain = domain.replace("http://", "").strip("/")

    # A short set of random characters to append to the name of the
    # wsgi process name
    proc = ''.join(choice(lowercase) for i in range(4))

    puts(green("Creating sample Apache config..."))
    config_file = open('%sapache/otb.conf' % OTB_DIR, 'w+')

    for line in open('%sapache/otb.conf.dist' % OTB_DIR, 'r'):
        line = line.replace('$path', OTB_DIR)
        line = line.replace('$domain', domain)
        line = line.replace('$proc', proc)
        line = line.replace('$client-path', CLIENT_DIR)
        config_file.write(line)
Exemplo n.º 8
0
def block_devices():
    procfile = sudo("cat /proc/partitions").splitlines()
    procfile.pop(0)
    procfile.pop(0)
    parts = [p.split() for p in procfile]

    mnt = sudo("mount -v").split('\n')
    mounts = [p.split() for p in mnt]
    mountvalid = {}
    for p in mounts:
        if (p[0] != 'none'):
            mountvalid[p[0]] = p
    inf = []
    for device in parts:
        dev = {}
        if ('/dev/' + device[3] in mountvalid):
            dev['mountpoint'] = mountvalid['/dev/' + device[3]][2]
            try:
                s = os.statvfs(dev['mountpoint'])
                dev['size'] = s.f_bsize * s.f_blocks
                dev['used'] = s.f_bsize * (s.f_blocks - s.f_bavail)
            except OSError:
                print 'OSError'
        else:
            dev['mountpoint'] = ''
            dev['size'] = int(device[2]) * 1024
            dev['used'] = -1
        dev['device'] = '/dev/' + device[3]
        inf.append(dev)
    puts(inf)
    return inf
Exemplo n.º 9
0
def network_config():
    def getDhcpInfo(device):
        info = {'address': 'none', 'netmask': 'none', 'gateway': 'none'}
        mnt = sudo('LC_ALL=c ifconfig ' + device)
        match = re.search(r'inet addr:(\S+).*mask:(\S+)', mnt, re.I)
        if match:
            info['address'] = match.group(1)
            info['netmask'] = match.group(2)
        mnt = sudo('route -n ')
        match = re.search(r'^0.0.0.0\s+(\S+).*' + re.escape(device),
                          mnt, re.I | re.M)
        if match:
            info['gateway'] = match.group(1)
        return info

    inf = []
    mnt = sudo('cat /etc/network/interfaces | egrep -v "^s*(#|$)"')
    devnets = mnt.split('auto')
    for devnet in devnets:
        if len(devnet) > 0:
            net = devnet.splitlines()
            dev = {}
            element = net[0].strip()
            if element in iface_list():
                dev['name'] = element
                dev['dhcp'] = "true"
                dev['address'] = "none"
                dev['netmask'] = "none"
                dev['gateway'] = "none"
                dev['default'] = "false"
                dev['virtual'] = "false"
                dev['bond-mode'] = "none"
                dev['bond-miimon'] = "none"
                dev['bond-master'] = "none"
                for e in net:
                    params = e.strip().split(' ')
                    if params[len(params) - 1] == 'dhcp':
                        dev['dhcp'] = "true"
                    if params[len(params) - 1] == 'static':
                        dev['dhcp'] = "false"
                    if params[0] == 'address':
                        dev['address'] = params[1]
                    if params[0] == 'netmask':
                        dev['netmask'] = params[1]
                    if params[0] == 'gateway':
                        dev['gateway'] = params[1]
                    if params[0] == 'bridge_ports':
                        dev['virtual'] = "true"
                    if params[0] == 'bond-mode':
                        dev['bond-mode'] = params[1]
                    if params[0] == 'bond-miimon':
                        dev['bond-miimon'] = params[1]
                    if params[0] == 'bond-master':
                        dev['bond-master'] = params[1]
                if dev['dhcp'] == 'true':
                    dev.update(getDhcpInfo(dev['name']))
            if len(dev) > 0:
                inf.append(dev)
    puts(inf)
    return inf
Exemplo n.º 10
0
def setup():
    #  test configuration start
    if not test_configuration():
        if not console.confirm("Configuration test %s! Do you want to continue?" % red_bg('failed'), default=False):
            abort("Aborting at user request.")
    #  test configuration end
    if env.ask_confirmation:
        if not console.confirm("Are you sure you want to setup %s?" % red_bg(env.project.upper()), default=False):
            abort("Aborting at user request.")
    puts(green_bg('Start setup...'))
    start_time = datetime.now()

    _verify_sudo
    _install_dependencies()
    _create_django_user()
    _setup_directories()
    _git_clone()
    _install_virtualenv()
    _create_virtualenv()
    _install_gunicorn()
    _install_requirements()
    _upload_nginx_conf()
    _upload_rungunicorn_script()
    _upload_supervisord_conf()

    end_time = datetime.now()
    finish_message = '[%s] Correctly finished in %i seconds' % \
    (green_bg(end_time.strftime('%H:%M:%S')), (end_time - start_time).seconds)
    puts(finish_message)
Exemplo n.º 11
0
def update():
    #  test configuration start
    if not test_configuration():
        if not console.confirm("Configuration test %s! Do you want to continue?" % red_bg('failed'), default=False):
            abort("Aborting at user request.")
    #  test configuration end
    _verify_sudo()
    if env.ask_confirmation:
        if not console.confirm("Are you sure you want to deploy in %s?" % red_bg(env.project.upper()), default=False):
            abort("Aborting at user request.")
    puts(green_bg('Start deploy...'))
    start_time = datetime.now()

    if not 'synced_projectdir' in env or not env.synced_projectdir:
        git_pull()
    # _install_requirements()
    # _upload_nginx_conf()
    # _upload_rungunicorn_script()
    # _upload_supervisord_conf()
    _prepare_django_project()
    _prepare_media_path()
    _supervisor_restart()

    end_time = datetime.now()
    finish_message = '[%s] Correctly deployed in %i seconds' % \
    (green_bg(end_time.strftime('%H:%M:%S')), (end_time - start_time).seconds)
    puts(finish_message)
Exemplo n.º 12
0
def rm_old_builds(path=None, user=None):
    '''Remove old build directories on the deploy server.

    Takes the same path and user options as **deploy**.
    '''
    configure(path=path, user=user)
    with cd(env.remote_path):
        with hide('stdout'):  # suppress ls/readlink output
            # get directory listing sorted by modification time (single-column for splitting)
            dir_listing = sudo('ls -t1', user=env.remote_acct)
            # get current and previous links so we don't remove either of them
            current = sudo('readlink current', user=env.remote_acct) if files.exists('current') else None
            previous = sudo('readlink previous', user=env.remote_acct) if files.exists('previous') else None

        # split dir listing on newlines and strip whitespace
        dir_items = [n.strip() for n in dir_listing.split('\n')]
        # regex based on how we generate the build directory:
        #   project name, numeric version, optional pre/dev suffix, optional revision #
        build_dir_regex = r'^%(project)s-[0-9.]+(-[A-Za-z0-9_-]+)?(-r[0-9]+)?$' % env
        build_dirs = [item for item in dir_items if re.match(build_dir_regex, item)]
        # by default, preserve the 3 most recent build dirs from deletion
        rm_dirs = build_dirs[3:]
        # if current or previous for some reason is not in the 3 most recent,
        # make sure we don't delete it
        for link in [current, previous]:
            if link in rm_dirs:
                rm_dirs.remove(link)

        if rm_dirs:
            for build_dir in rm_dirs:
                sudo('rm -rf %s' % build_dir, user=env.remote_acct)
        else:
            puts('No old build directories to remove')
Exemplo n.º 13
0
def create_database(name, owner=None, owner_host="localhost", charset="utf8", collate="utf8_general_ci", **kwargs):
    """
    Create a MySQL database.

    Example::

        import fabtools

        # Create DB if it does not exist
        if not fabtools.mysql.database_exists('myapp'):
            fabtools.mysql.create_database('myapp', owner='dbuser')

    """
    with settings(hide("running")):

        _query(
            "CREATE DATABASE %(name)s CHARACTER SET %(charset)s COLLATE %(collate)s;"
            % {"name": name, "charset": charset, "collate": collate},
            **kwargs
        )

        if owner:
            _query(
                "GRANT ALL PRIVILEGES ON %(name)s.* TO '%(owner)s'@'%(owner_host)s' WITH GRANT OPTION;"
                % {"name": name, "owner": owner, "owner_host": owner_host},
                **kwargs
            )

    puts("Created MySQL database '%s'." % name)
Exemplo n.º 14
0
def create(dbuser=None, dbname=None):
    """
    Create a Mysql Database and User: db.mysql.create:dbuser,dbname

    Example: db.mysql.create:myproject,myproject

    The password will be randomly generated.
    *  Run once.
    ** This command must be executed by a sudoer.
    """
    dbuser = dbuser or env.PROJECT.appname
    dbname = dbname or env.PROJECT.appname

    password = run('makepasswd --chars 32')
    assert(len(password) == 32)  # Ouch!

    sudo("mysql --defaults-file=/root/.my.cnf -e \"CREATE DATABASE %(dbname)s;\"" % locals())

    sudo("mysql --defaults-file=/root/.my.cnf -e \"CREATE USER '%(dbuser)s'@'localhost' IDENTIFIED BY '%(password)s';\"" % locals())
    sudo("mysql --defaults-file=/root/.my.cnf -e \"GRANT ALL PRIVILEGES ON %(dbname)s.* TO '%(dbuser)s'@'localhost';\"" % locals())

    # Persist database password
    my_cfg = "[client]\ndatabase=%s\nuser=%s\npassword=%s" % (dbname, dbuser, password)
    sudo("echo '%s' > %s/.my.cnf" % (my_cfg, env.PROJECT.share))
    sudo('chmod 640 %(share)s/.my.cnf' % env.PROJECT)
    sudo('chown %(user)s %(share)s/.my.cnf' % env.PROJECT)
    sudo('chgrp www-data %(share)s/.my.cnf' % env.PROJECT)

    db_url = 'mysql://%(dbuser)s:%(password)s@localhost/%(dbname)s' % locals()
    puts(yellow('DATABASE_URL => ' + db_url))
    return db_url
Exemplo n.º 15
0
def server(hostname, fqdn, email):
    '''
    Setup a new server: server_setup:hostname,fqdn,email

    Example: server:palmas,palmas.dekode.com.br,[email protected]
    '''
    puts(green('Server setup...'))

    scripts = Path(__file__).parent.child('scripts')

    files = [
        scripts.child('server_setup.sh'),
        scripts.child('postfix.sh'),
        scripts.child('watchdog.sh'),
        scripts.child('uwsgi.sh'),
    ]

    # Choose database
    answer = ask('Which database to install? [P]ostgres, [M]ysql, [N]one ',
        options={
            'P': [scripts.child('pg_hba.conf'), scripts.child('postgresql.sh')],
            'M': [scripts.child('mysql.sh')],
            'N': []})

    files.extend(answer)

    # Create superuser
    if 'Y' == ask('Create superuser? [Y]es or [N]o ', options=('Y', 'N')):
        createuser.run(as_root=True)

    # Upload files and fixes execution mode
    for localfile in files:
        put(localfile, '~/', mirror_local_mode=True)

    run('~root/server_setup.sh %(hostname)s %(fqdn)s %(email)s' % locals())
Exemplo n.º 16
0
def venv_load():
    """
    Load an archived virtualenv

    The task will extract an archived virtual environment created with
    :meth:`~venv_dump`. Normally this command is invoked
    indirectly via the compound task :meth:`inveniofab.compound.load` which
    takes care of loading the database after extracting the virtual
    environment.
    """
    puts(cyan(">>> Loading archived virtualenv..."))

    ctx = {
        'dirname': os.path.dirname(env.CFG_INVENIO_PREFIX),
        'basename': os.path.basename(env.CFG_INVENIO_PREFIX),
    }

    archive_file = "%(dirname)s/%(basename)s.tar.gz" % ctx
    if not exists_local(archive_file):
        abort(red("Archived virtualenv does not exists - cannot continue") % env)

    # Remove previous installation
    if exists_local(env.CFG_INVENIO_PREFIX):
        res = confirm("Remove installation in %(CFG_INVENIO_PREFIX)s ?" % env)
        if not res:
            abort(red("Cannot continue") % env)
        else:
            sudo_local("rm -Rf %(CFG_INVENIO_PREFIX)s" % env)

    cmds = [
        "cd %(dirname)s",
        "tar -xvzf %(basename)s.tar.gz",
    ]
    sudo_local(" && ".join(cmds) % ctx, user=env.CFG_INVENIO_USER)
Exemplo n.º 17
0
def repo_setup(repo, ref):
    """ Clone repository """
    puts(cyan(">>> Setting up repository %s with ref %s..." % (repo, ref)))

    topsrcdir = repo_check(repo, check_path=False, workdir=False)
    workdir = repo_check(repo, check_path=False, workdir=True)
    gitdir = os.path.join(topsrcdir, '.git')

    if not os.path.exists(env.CFG_SRCDIR):
        res = confirm("Create repository root %s?" % env.CFG_SRCDIR)
        if not res:
            abort(red("Cannot continue") % env)
        else:
            local("mkdir -p %s" % env.CFG_SRCDIR)

    if not os.path.exists(gitdir) and os.path.exists(topsrcdir):
        res = confirm("Remove %s (it does not seem to be a git repository)?" % topsrcdir)
        if not res:
            abort(red("Cannot continue") % env)
        else:
            local("rm -Rf %s" % topsrcdir)

    if not os.path.exists(gitdir):
        git_clone(repo)
    if not os.path.exists(workdir):
        git_newworkdir(repo)
    git_checkout(repo, ref)
    repo_prepare(repo)
Exemplo n.º 18
0
def create_database(name, owner=None, owner_host='localhost', charset='utf8',
                    collate='utf8_general_ci', **kwargs):
    """
    Create a MySQL database.

    Example::

        import fabtools

        # Create DB if it does not exist
        if not fabtools.mysql.database_exists('myapp'):
            fabtools.mysql.create_database('myapp', owner='dbuser')

    """
    with settings(hide('running')):

        query("CREATE DATABASE %(name)s CHARACTER SET %(charset)s COLLATE %(collate)s;" % {
            'name': name,
            'charset': charset,
            'collate': collate
        }, **kwargs)

        if owner:
            query("GRANT ALL PRIVILEGES ON %(name)s.* TO '%(owner)s'@'%(owner_host)s' WITH GRANT OPTION;" % {
                'name': name,
                'owner': owner,
                'owner_host': owner_host
            }, **kwargs)

    puts("Created MySQL database '%s'." % name)
Exemplo n.º 19
0
def venv_dump():
    """
    Archive a virtualenv

    The task will create an archive ``<virtualenv name>.tar.gz`` of the entire
    virtual environment. If an existing archive already exists, the user will
    be asked for confirmation to remove it. Normally this command is invoked
    indirectly via the compound task :meth:`inveniofab.compound.dump` which
    takes care of dumping the database prior to archiving the virtual
    environment.
    """
    puts(cyan(">>> Creating archive of virtualenv in %(CFG_INVENIO_PREFIX)s..." % env))

    ctx = {
        'dirname': os.path.dirname(env.CFG_INVENIO_PREFIX),
        'basename': os.path.basename(env.CFG_INVENIO_PREFIX),
    }

    archive_file = "%(dirname)s/%(basename)s.tar.gz" % ctx
    if exists_local(archive_file):
        res = confirm("Existing archive already exists - remove?")
        if not res:
            abort(red("Cannot continue") % env)
        else:
            sudo_local("rm -Rf %s" % archive_file, user=env.CFG_INVENIO_USER)

    cmds = [
        "cd %(dirname)s",
        "tar -cvzf %(basename)s.tar.gz %(basename)s",
    ]
    sudo_local(" && ".join(cmds) % ctx, user=env.CFG_INVENIO_USER)
Exemplo n.º 20
0
def buildinfo_hash():
    conf = get_conf()

    fn = os.path.join(conf.build.paths.projectroot,
                      conf.build.paths.includes,
                      'hash.rst')

    generate_hash_file(fn)

    if conf.project.name == 'manual':
        release_fn = os.path.join(conf.build.paths.projectroot,
                                  conf.build.paths.branch_staging,
                                  'release.txt')
    else:
        release_fn = os.path.join(conf.build.paths.projectroot,
                                  conf.build.paths.public, 'release.txt')


    if not os.path.exists(os.path.dirname(release_fn)):
        os.makedirs(os.path.dirname(release_fn))

    with open(release_fn, 'w') as f:
        f.write(conf.git.commit)

    puts('[build]: generated "{0}" with current release hash.'.format(release_fn))
Exemplo n.º 21
0
def create(dbuser=None, dbname=None):
    """
    Create a PostgreSQL Database and User: db.pgsql.create:dbuser,dbname

    Example: db.mysql.create:myproject,myproject

    The password will be randomly generated.
    *  Run once.
    ** This command must be executed by a sudoer.
    """
    dbuser = dbuser or env.PROJECT.appname
    dbname = dbname or env.PROJECT.appname

    password = run('makepasswd --chars 32')
    assert(len(password) == 32)  # Ouch!

    sudo('psql template1 -c "CREATE USER %(dbuser)s WITH CREATEDB ENCRYPTED PASSWORD \'%(password)s\'"' % locals(), user='******')
    sudo('createdb "%(dbname)s" -O "%(dbuser)s"' % locals(), user='******')
    sudo('psql %(dbname)s -c "CREATE EXTENSION unaccent;"' % locals(), user='******')

    # Persist database password
    cfg = "localhost:5432:%(dbname)s:%(dbuser)s:%(password)s" % locals()
    sudo("echo '%s' > %s/.pgpass" % (cfg, env.PROJECT.share))
    sudo('chown %(user)s %(share)s/.pgpass' % env.PROJECT)
    sudo('chgrp www-data %(share)s/.pgpass' % env.PROJECT)
    sudo('chmod 600 %(share)s/.pgpass' % env.PROJECT)

    db_url = 'pgsql://%(dbuser)s:%(password)s@localhost/%(dbname)s' % locals()
    puts(yellow('DATABASE_URL => ' + db_url))
    return db_url
Exemplo n.º 22
0
def import_configuration(module_name, data_dir):
    """
    Load configuration from file as python module.
    """

    try:
        debug("Attempting to load {module_name}.py from {data_dir}",
              module_name=module_name,
              data_dir=data_dir)

        module = _import(module_name, data_dir)
        puts("Loaded {module_name}.py from {data_dir}".format(module_name=module_name,
                                                              data_dir=data_dir))
    except ImportError as e:
        # if the module was found but could not be loaded, re-raise the error
        if getattr(e, 'module_path', None):
            raise e
        debug("Attempting to load {module_name}.py_tmpl from {data_dir}",
              module_name=module_name,
              data_dir=data_dir)
        # try to load as a template
        try:
            env = Environment(loader=FileSystemLoader(data_dir))
            rendered_module = env.get_template(module_name + '.py_tmpl').render({})
            module = _import_string(module_name, rendered_module)
            puts("Loaded {module_name}.py_tmpl from {data_dir}".format(module_name=module_name,
                                                                       data_dir=data_dir))
        except TemplateNotFound:
            debug("Could not load {module_name} from {data_dir}",
                  module_name=module_name,
                  data_dir=data_dir)
            module = {}

    return options.module_as_dict(module)
Exemplo n.º 23
0
def create_placement_group(name=None):
    if name is None:
        abort('\n'
              '\n'
              '    You must specify a name for the placement group:\n'
              '        fab create_placement_group:NAME\n')

    ec2 = boto.ec2.connect_to_region(env.ec2_region)
    try:
        if ec2.create_placement_group(name):
            puts('\n'
                 '    "%s" placement group created successfully!' % name)
        else:
            abort('\n'
                  '\n'
                  '    "%s" placement group could not be created!' % name)
    except boto.exception.EC2ResponseError as e:
        if e.error_code == 'InvalidPlacementGroup.Duplicate':
            puts('\n'
                 '    "%s" placement group already exists' % name)
        else:
            abort('\n'
                  '\n'
                  '    "%s" placement group could not be created!: %s' %
                  (name, e.message))
Exemplo n.º 24
0
def write_configfile(remote_path, content=None, filename=None):
    _info('attempting to write {}...'.format(remote_path))

    rm_file = False
    if not filename:
        _, filename = tempfile.mkstemp()
        rm_file = True
        with open(filename, 'w') as f:
            f.write(content)

    _, old = tempfile.mkstemp()

    with hide('running', 'stdout', 'stderr'):
        if exists(remote_path):
            get(remote_path, old)
            with settings(hide('warnings'), warn_only=True):
                res = local('diff {} {}'.format(old, filename), capture=True)
            if res.failed:
                _bad('files differ')
                puts(res, show_prefix=False)
                if prompt('update file? [y/n]') == 'y':
                    _info('writing new {}...'.format(remote_path))
                    put(filename, remote_path, use_sudo=True, mode=0644)
            else:
                _good('files already match')
        else:
            _good('no remote file exists, writing now')
            put(filename, remote_path, use_sudo=True, mode=0644)

    # remove files
    os.remove(old)
    if rm_file:
        os.remove(filename)
Exemplo n.º 25
0
def configure(path=None, solr_path=None, user=None, solr_user=None, url_prefix=None,
              remote_proxy=None, solr_admin_url=None):
    'Configuration settings used internally for the build.'

    env.version = openemory.__version__
    config_from_git()
    # construct a unique build directory name based on software version and git revision
    env.build_dir = '%(project)s-%(version)s-%(git_rev)s' % env
    env.tarball = '%(project)s-%(version)s-%(git_rev)s.tar.bz2' % env
    env.solr_tarball = '%(project)s-solr-%(version)s%(git_rev_tag)s.tar.bz2' % env

    if path:
        env.remote_path = path.rstrip('/')
    if solr_path:
        env.remote_solr_path = solr_path.rstrip('/')
    if user:
        env.remote_acct = user
    if solr_user:
        env.solr_acct = solr_user
    if url_prefix:
        env.url_prefix = url_prefix.rstrip('/')
    if solr_admin_url:
        env.solr_admin_url = solr_admin_url

    if remote_proxy:
        env.remote_proxy = remote_proxy
        puts('Setting remote proxy to %(remote_proxy)s' % env)
Exemplo n.º 26
0
def manpage_url():
    if env.input_file is None:
        abort('[man]: you must specify input and output files.')

    project_source = 'source'

    top_level_items = set()
    for fs_obj in os.listdir(project_source):
        if fs_obj.startswith('.static') or fs_obj == 'index.txt':
            continue
        if os.path.isdir(os.path.join(project_source, fs_obj)):
            top_level_items.add(fs_obj)
        if fs_obj.endswith('.txt'):
            top_level_items.add(fs_obj[:-4])

    top_level_items = '/' + '.*|/'.join(top_level_items)
    re_string = '(\\\\fB({0}.*)\\\\fP)'.format(top_level_items)

    with open(env.input_file, 'r') as f:
        manpage = f.read()

    manpage = re.sub(re_string, "http://docs.mongodb.org/manual\\2", manpage)

    with open(env.input_file, 'w') as f:
        f.write(manpage)

    puts("[{0}]: fixed urls in {1}".format('man', env.input_file))
Exemplo n.º 27
0
def install():
    """
    Update the requirements.
    """
    puts('Installing...')
    cmd = '{virtualenv_dir}/bin/python setup.py develop'
    run_as_addok(cmd.format(**env))
Exemplo n.º 28
0
def _create_security_group_abort_on_error(name, desc, rules):
    ec2 = boto.ec2.connect_to_region(env.ec2_region)
    try:
        sg = ec2.create_security_group(name, desc, vpc_id=env.ec2_vpc_id)
    except boto.exception.EC2ResponseError as e:
        if e.error_code == 'InvalidGroup.Duplicate':
            abort('\n'
                  '\n'
                  '    "%s" security group already exists!' % name)
        else:
            abort('\n'
                  '\n'
                  '    "%s" security group could not be created!: %s' %
                  (name, e.message))

    for rule in rules:
        ec2.authorize_security_group(
            group_id=sg.id,
            ip_protocol=rule[0],
            from_port=rule[1],
            to_port=rule[2],
            cidr_ip=rule[3])

    puts('\n'
         '    "%s" security group created successfully!' % name)
Exemplo n.º 29
0
 def puts(self, versions):
     for i, (version, is_tmp) in enumerate(versions):
         if is_tmp:
             s = '%s - %s (tmp)'
         else:
             s = '%s - %s'
         puts(s % (i, posixpath.basename(version)))
Exemplo n.º 30
0
def _generate_images(cmd, dpi, width, target, source):
    local(cmd.format(cmd=_get_inkscape_cmd(),
                     dpi=dpi,
                     width=width,
                     target=target,
                     source=source))
    puts('[image]: generated image file  {0}'.format(source))
Exemplo n.º 31
0
def analyse_owd(test_id='',
                out_dir='',
                replot_only='0',
                source_filter='',
                min_values='3',
                omit_const='0',
                ymin='0',
                ymax='0',
                lnames='',
                stime='0.0',
                etime='0.0',
                out_name='',
                pdf_dir='',
                ts_correct='1',
                plot_params='',
                plot_script='',
                burst_sep='0.0',
                sburst='1',
                eburst='0',
                seek_window='',
                anchor_map='',
                owd_midpoint='0'):
    "Plot OWD of flows"

    # Note we allow ts_correct as a parameter for syntactic similarity to other
    # analyse_* tasks, but abort with warning if user tries explicitly to
    # make it 0 (which is unacceptable for OWD calculations)

    if ts_correct == '0':
        abort("Warning: Cannot do OWD calculations with ts_correct=0")

    (test_id_arr, out_files,
     out_groups) = _extract_owd_pktloss(test_id,
                                        out_dir,
                                        replot_only,
                                        source_filter,
                                        ts_correct,
                                        burst_sep,
                                        sburst,
                                        eburst,
                                        seek_window,
                                        log_loss='0',
                                        anchor_map=anchor_map,
                                        owd_midpoint=owd_midpoint)

    (out_files, out_groups) = filter_min_values(out_files, out_groups,
                                                min_values)
    out_name = get_out_name(test_id_arr, out_name)

    burst_sep = float(burst_sep)
    if burst_sep == 0.0:
        plot_time_series(out_name,
                         out_files,
                         'OWD (ms)',
                         2,
                         1000.0,
                         'pdf',
                         out_name + '_owd',
                         pdf_dir=pdf_dir,
                         omit_const=omit_const,
                         ymin=float(ymin),
                         ymax=float(ymax),
                         lnames=lnames,
                         stime=stime,
                         etime=etime,
                         groups=out_groups,
                         plot_params=plot_params,
                         plot_script=plot_script,
                         source_filter=source_filter)
    else:
        # Each trial has multiple files containing data from separate bursts detected within the trial
        plot_incast_ACK_series(out_name,
                               out_files,
                               'OWD (ms)',
                               2,
                               1000.0,
                               'pdf',
                               out_name + '_owd',
                               pdf_dir=pdf_dir,
                               aggr='',
                               omit_const=omit_const,
                               ymin=float(ymin),
                               ymax=float(ymax),
                               lnames=lnames,
                               stime=stime,
                               etime=etime,
                               groups=out_groups,
                               burst_sep=burst_sep,
                               sburst=int(sburst),
                               plot_params=plot_params,
                               plot_script=plot_script,
                               source_filter=source_filter)

    # done
    puts('\n[MAIN] COMPLETED plotting OWDs %s \n' % out_name)
Exemplo n.º 32
0
def test_configuration(verbose=True):
    errors = []
    parameters_info = []
    if 'project' not in env or not env.project:
        errors.append('Project name missing')
    elif verbose:
        parameters_info.append(('Project name', env.project))
    if 'repository' not in env or not env.repository:
        errors.append('Repository url missing')
    elif verbose:
        parameters_info.append(('Repository url', env.repository))
    if 'hosts' not in env or not env.hosts:
        errors.append('Hosts configuration missing')
    elif verbose:
        parameters_info.append(('Hosts', env.hosts))
    if 'django_user' not in env or not env.django_user:
        errors.append('Django user missing')
    elif verbose:
        parameters_info.append(('Django user', env.django_user))
    if 'django_user_group' not in env or not env.django_user_group:
        errors.append('Django user group missing')
    elif verbose:
        parameters_info.append(('Django user group', env.django_user_group))
    if 'django_user_home' not in env or not env.django_user_home:
        errors.append('Django user home dir missing')
    elif verbose:
        parameters_info.append(('Django user home dir', env.django_user_home))
    if 'projects_path' not in env or not env.projects_path:
        errors.append('Projects path configuration missing')
    elif verbose:
        parameters_info.append(('Projects path', env.projects_path))
    if 'code_root' not in env or not env.code_root:
        errors.append('Code root configuration missing')
    elif verbose:
        parameters_info.append(('Code root', env.code_root))
    if 'django_project_root' not in env or not env.django_project_root:
        errors.append('Django project root configuration missing')
    elif verbose:
        parameters_info.append(
            ('Django project root', env.django_project_root))
    if 'django_project_settings' not in env or not env.django_project_settings:
        env.django_project_settings = 'settings'
    if verbose:
        parameters_info.append(
            ('django_project_settings', env.django_project_settings))
    if 'django_media_path' not in env or not env.django_media_path:
        errors.append('Django media path configuration missing')
    elif verbose:
        parameters_info.append(('Django media path', env.django_media_path))
    if 'django_static_path' not in env or not env.django_static_path:
        errors.append('Django static path configuration missing')
    elif verbose:
        parameters_info.append(('Django static path', env.django_static_path))
    if 'south_used' not in env:
        errors.append('"south_used" configuration missing')
    elif verbose:
        parameters_info.append(('south_used', env.south_used))
    if 'virtenv' not in env or not env.virtenv:
        errors.append('virtenv configuration missing')
    elif verbose:
        parameters_info.append(('virtenv', env.virtenv))
    if 'virtenv_options' not in env or not env.virtenv_options:
        errors.append(
            '"virtenv_options" configuration missing, you must have at least one option'
        )
    elif verbose:
        parameters_info.append(('virtenv_options', env.virtenv_options))
    if 'requirements_file' not in env or not env.requirements_file:
        env.requirements_file = join(env.code_root, 'requirements.txt')
    if verbose:
        parameters_info.append(('requirements_file', env.requirements_file))
    if 'ask_confirmation' not in env:
        errors.append('"ask_confirmation" configuration missing')
    elif verbose:
        parameters_info.append(('ask_confirmation', env.ask_confirmation))
    if 'gunicorn_bind' not in env or not env.gunicorn_bind:
        errors.append('"gunicorn_bind" configuration missing')
    elif verbose:
        parameters_info.append(('gunicorn_bind', env.gunicorn_bind))
    if 'gunicorn_logfile' not in env or not env.gunicorn_logfile:
        errors.append('"gunicorn_logfile" configuration missing')
    elif verbose:
        parameters_info.append(('gunicorn_logfile', env.gunicorn_logfile))
    if 'rungunicorn_script' not in env or not env.rungunicorn_script:
        errors.append('"rungunicorn_script" configuration missing')
    elif verbose:
        parameters_info.append(('rungunicorn_script', env.rungunicorn_script))
    if 'gunicorn_workers' not in env or not env.gunicorn_workers:
        errors.append(
            '"gunicorn_workers" configuration missing, you must have at least one worker'
        )
    elif verbose:
        parameters_info.append(('gunicorn_workers', env.gunicorn_workers))
    if 'gunicorn_worker_class' not in env or not env.gunicorn_worker_class:
        errors.append('"gunicorn_worker_class" configuration missing')
    elif verbose:
        parameters_info.append(
            ('gunicorn_worker_class', env.gunicorn_worker_class))
    if 'gunicorn_loglevel' not in env or not env.gunicorn_loglevel:
        errors.append('"gunicorn_loglevel" configuration missing')
    elif verbose:
        parameters_info.append(('gunicorn_loglevel', env.gunicorn_loglevel))
    if 'nginx_server_name' not in env or not env.nginx_server_name:
        errors.append('"nginx_server_name" configuration missing')
    elif verbose:
        parameters_info.append(('nginx_server_name', env.nginx_server_name))
    if 'nginx_conf_file' not in env or not env.nginx_conf_file:
        errors.append('"nginx_conf_file" configuration missing')
    elif verbose:
        parameters_info.append(('nginx_conf_file', env.nginx_conf_file))
    if 'nginx_client_max_body_size' not in env or not env.nginx_client_max_body_size:
        env.nginx_client_max_body_size = 10
    elif not isinstance(env.nginx_client_max_body_size, int):
        errors.append('"nginx_client_max_body_size" must be an integer value')
    if verbose:
        parameters_info.append(
            ('nginx_client_max_body_size', env.nginx_client_max_body_size))
    if 'nginx_htdocs' not in env or not env.nginx_htdocs:
        errors.append('"nginx_htdocs" configuration missing')
    elif verbose:
        parameters_info.append(('nginx_htdocs', env.nginx_htdocs))

    if 'nginx_https' not in env:
        env.nginx_https = False
    elif not isinstance(env.nginx_https, bool):
        errors.append('"nginx_https" must be a boolean value')
    elif verbose:
        parameters_info.append(('nginx_https', env.nginx_https))

    if 'supervisor_program_name' not in env or not env.supervisor_program_name:
        env.supervisor_program_name = env.project
    if verbose:
        parameters_info.append(
            ('supervisor_program_name', env.supervisor_program_name))
    if 'supervisorctl' not in env or not env.supervisorctl:
        errors.append('"supervisorctl" configuration missing')
    elif verbose:
        parameters_info.append(('supervisorctl', env.supervisorctl))
    if 'supervisor_autostart' not in env or not env.supervisor_autostart:
        errors.append('"supervisor_autostart" configuration missing')
    elif verbose:
        parameters_info.append(
            ('supervisor_autostart', env.supervisor_autostart))
    if 'supervisor_autorestart' not in env or not env.supervisor_autorestart:
        errors.append('"supervisor_autorestart" configuration missing')
    elif verbose:
        parameters_info.append(
            ('supervisor_autorestart', env.supervisor_autorestart))
    if 'supervisor_redirect_stderr' not in env or not env.supervisor_redirect_stderr:
        errors.append('"supervisor_redirect_stderr" configuration missing')
    elif verbose:
        parameters_info.append(
            ('supervisor_redirect_stderr', env.supervisor_redirect_stderr))
    if 'supervisor_stdout_logfile' not in env or not env.supervisor_stdout_logfile:
        errors.append('"supervisor_stdout_logfile" configuration missing')
    elif verbose:
        parameters_info.append(
            ('supervisor_stdout_logfile', env.supervisor_stdout_logfile))
    if 'supervisord_conf_file' not in env or not env.supervisord_conf_file:
        errors.append('"supervisord_conf_file" configuration missing')
    elif verbose:
        parameters_info.append(
            ('supervisord_conf_file', env.supervisord_conf_file))

    if errors:
        if len(errors) == 29:
            ''' all configuration missing '''
            puts(
                'Configuration missing! Please read README.rst first or go ahead at your own risk.'
            )
        else:
            puts('Configuration test revealed %i errors:' % len(errors))
            puts('%s\n\n* %s\n' % ('-' * 37, '\n* '.join(errors)))
            puts('-' * 40)
            puts('Please fix them or go ahead at your own risk.')
        return False
    elif verbose:
        for parameter in parameters_info:
            parameter_formatting = "'%s'" if isinstance(parameter[1],
                                                        str) else "%s"
            parameter_value = parameter_formatting % parameter[1]
            puts('%s %s' % (parameter[0].ljust(27), green(parameter_value)))
    puts('Configuration tests passed!')
    return True
Exemplo n.º 33
0
def run_experiment(test_id='', test_id_pfx='', *args, **kwargs):

    do_init_os = kwargs.get('do_init_os', '1')
    ecn = kwargs.get('ecn', '0')
    tcp_cc_algo = kwargs.get('tcp_cc_algo', 'default')
    duration = kwargs.get('duration', '')
    if duration == '':
        abort('No experiment duration specified')

    # create sub directory for test id prefix
    mkdir_p(test_id_pfx)

    # remove <test_id>* files in <test_id_pfx> directory if exists
    file_pattern = test_id_pfx + "/" + test_id + "_*"

    for f in glob.glob(file_pattern):
        os.remove(f)

    # log experiment in started list
    local('echo "%s" >> experiments_started.txt' % test_id)

    puts('\n[MAIN] Starting experiment %s \n' % test_id)

    tftpboot_dir = ''
    try:
        tftpboot_dir = config.TPCONF_tftpboot_dir
    except AttributeError:
        pass

    # initialise
    if tftpboot_dir != '' and do_init_os == '1':
        execute(get_host_info,
                netint='0',
                hosts=config.TPCONF_router + config.TPCONF_hosts)
        execute(init_os_hosts, file_prefix=test_id_pfx,
                local_dir=test_id_pfx)  # reboot
        clear_type_cache()  # clear host type cache
        disconnect_all()  # close all connections
        time.sleep(30)  # give hosts some time to settle down (after reboot)

    # initialise topology
    try:
        switch = ''
        port_prefix = ''
        port_offset = 0
        try:
            switch = config.TPCONF_topology_switch
            port_prefix = config.TPCONF_topology_switch_port_prefix
            port_offset = config.TPCONF_topology_switch_port_offset
        except AttributeError:
            pass

        if config.TPCONF_config_topology == '1' and do_init_os == '1':
            # we cannot call init_topology directly, as it is decorated with
            # runs_once. in experiment.py we have empty host list whereas if we
            # run init_topology from command line we have the -H host list. executing
            # an runs_once task with empty host list (hosts set in execute call), it
            # will only be executed for the first host, which is not what we
            # want. in contrast if we have a host list in context, execute will be
            # executed once for each host (hence we need runs_once when called from
            # the command line).

            # sequentially configure switch
            execute(init_topology_switch,
                    switch,
                    port_prefix,
                    port_offset,
                    hosts=config.TPCONF_hosts)
            # configure hosts in parallel
            execute(init_topology_host, hosts=config.TPCONF_hosts)

    except AttributeError:
        pass

    file_cleanup(test_id_pfx)  # remove any .start files
    execute(get_host_info,
            netmac='0',
            hosts=config.TPCONF_router + config.TPCONF_hosts)
    execute(sanity_checks)
    execute(init_hosts, *args, **kwargs)

    # first is the legacy case with single router and single queue definitions
    # second is the multiple router case with several routers and several queue
    # definitions
    if isinstance(config.TPCONF_router_queues, list):
        # start queues/pipes
        config_router_queues(config.TPCONF_router_queues, config.TPCONF_router,
                             **kwargs)
        # show pipe setup
        execute(show_pipes, hosts=config.TPCONF_router)
    elif isinstance(config.TPCONF_router_queues, dict):
        for router in config.TPCONF_router_queues.keys():
            # start queues/pipes for router r
            config_router_queues(config.TPCONF_router_queues[router], [router],
                                 **kwargs)
            # show pipe setup
            execute(show_pipes, hosts=[router])

    # log config parameters
    execute(log_config_params,
            file_prefix=test_id,
            local_dir=test_id_pfx,
            hosts=['MAIN'],
            *args,
            **kwargs)
    # log host tcp settings
    execute(log_host_tcp,
            file_prefix=test_id,
            local_dir=test_id_pfx,
            hosts=['MAIN'],
            *args,
            **kwargs)

    # start all loggers
    execute(start_loggers,
            file_prefix=test_id,
            local_dir=test_id_pfx,
            remote_dir=config.TPCONF_remote_dir)

    # Start broadcast ping and loggers (if enabled)
    try:
        if config.TPCONF_bc_ping_enable == '1':
            # for multicast need IP of outgoing interface
            # which is router's control interface
            use_multicast = socket.gethostbyname(
                config.TPCONF_router[0].split(':')[0])

            # get configured broadcast or multicast address
            bc_addr = ''
            try:
                bc_addr = config.TPCONF_bc_ping_address
            except AttributeError:
                # use default multicast address
                bc_addr = '224.0.1.199'

            execute(start_bc_ping_loggers,
                    file_prefix=test_id,
                    local_dir=test_id_pfx,
                    remote_dir=config.TPCONF_remote_dir,
                    bc_addr=bc_addr)

            try:
                bc_ping_rate = config.TPCONF_bc_ping_rate
            except AttributeError:
                bc_ping_rate = '1'

            # start the broadcst ping on the first router
            execute(start_bc_ping,
                    file_prefix=test_id,
                    local_dir=test_id_pfx,
                    remote_dir=config.TPCONF_remote_dir,
                    bc_addr=bc_addr,
                    rate=bc_ping_rate,
                    use_multicast=use_multicast,
                    hosts=[config.TPCONF_router[0]])
    except AttributeError:
        pass

    # start traffic generators
    sync_delay = 5.0
    start_time = datetime.datetime.now()
    total_duration = float(duration) + sync_delay
    for t, c, v in sorted(config.TPCONF_traffic_gens, cmp=_cmp_timekeys):

        try:
            # delay everything to have synchronised start
            next_time = float(t) + sync_delay
        except ValueError:
            abort('Traffic generator entry key time must be a float')

        # add the kwargs parameter to the call of _param
        v = re.sub("(V_[a-zA-Z0-9_-]*)", "_param('\\1', kwargs)", v)

        # trim white space at both ends
        v = v.strip()

        if v[-1] != ',':
            v = v + ','
        # add counter parameter
        v += ' counter="%s"' % c
        # add file prefix parameter
        v += ', file_prefix=test_id'
        # add remote dir
        v += ', remote_dir=\'%s\'' % config.TPCONF_remote_dir
        # add test id prefix to put files into correct directory
        v += ', local_dir=\'%s\'' % test_id_pfx
        # we don't need to check for presence of tools inside start functions
        v += ', check="0"'

        # set wait time until process is started
        now = datetime.datetime.now()
        dt_diff = now - start_time
        sec_diff = (dt_diff.days * 24 * 3600 + dt_diff.seconds) + \
            (dt_diff.microseconds / 1000000.0)
        if next_time - sec_diff > 0:
            wait = str(next_time - sec_diff)
        else:
            wait = '0.0'
        v += ', wait="' + wait + '"'

        _nargs, _kwargs = eval('_args(%s)' % v)

        # get traffic generator duration
        try:
            traffic_duration = _kwargs['duration']
        except:
            traffic_duration = 0
        # find the largest total_duration possible
        if next_time + traffic_duration > total_duration:
            total_duration = next_time + traffic_duration

        execute(*_nargs, **_kwargs)

    # print process list
    print_proc_list()

    # wait until finished (add additional 5 seconds to be sure)
    total_duration = float(total_duration) + 5.0
    puts('\n[MAIN] Running experiment for %i seconds\n' % int(total_duration))
    time.sleep(total_duration)

    # shut everything down and get log data
    execute(stop_processes, local_dir=test_id_pfx)
    execute(log_queue_stats,
            file_prefix=test_id,
            local_dir=test_id_pfx,
            hosts=config.TPCONF_router)

    # log test id in completed list
    local('echo "%s" >> experiments_completed.txt' % test_id)

    # kill any remaining processes
    execute(kill_old_processes,
            hosts=config.TPCONF_router + config.TPCONF_hosts)

    # done
    puts('\n[MAIN] COMPLETED experiment %s \n' % test_id)
Exemplo n.º 34
0
def release(branch, release_type):
    """Release a new version.

    :branch,release_type

    branch to be released
    release_type: see fab -d compute_version

    Preflight, runs tests, bumps version number, tags repo and uploads to pypi.
    """
    _invirt()
    with settings(hide('stderr', 'stdout', 'running')):
        # Preflight checks.
        version, changes = _sync_and_preflight_check(branch, release_type)
        puts(c.blue("Testing..."))
        # Lets check out this branch and test it.
        local("git checkout %s" % branch,  capture=True)
        test()
        puts(c.green("Tests passed!"))
        puts(c.blue("Build, package and publish..."))
        # Commit to the version file.
        local('echo "%s" > VERSION' % version)
        # Build
        local("python setup.py register sdist bdist_egg upload")
        puts(c.green("Uploaded to PyPI!"))
        # Commit the version change and tag the release.
        puts(c.blue("Commit, tag, merge, prune and push."))
        local('git commit -m"Bumped version to v%s" -a' % version)
        local('git tag -a "v%s" -m "Release version %s"' % (version, version))
        # Merge the branch into master and push them both to origin
        # Conflicts should never occur, due to preflight checks.
        local('git checkout master', capture=True)
        local('git merge %s' % branch, capture=True)
        local('git branch -d %s' % branch)
        local('git push origin :%s' % branch)  # This deletes remote branch.
        local('git push --tags origin master')
        puts(c.magenta("Released branch %s as v%s!" % (branch, version)))
        post_release_install_verification()
Exemplo n.º 35
0
def build_api_docs():
    """Build the HTML API docs."""
    puts(c.magenta("Building HTML API docs..."))
    with settings(hide('running', 'stdout', 'stderr')):
        with lcd('docs'):
            local('make html')
Exemplo n.º 36
0
def devdeps():
    """Install the development dependencies.."""
    _invirt()
    puts(c.magenta("Installing dev dependencies..."))
    with settings(hide('stdout')):
        local('pip install -r dev-req.txt')
Exemplo n.º 37
0
def analyse_pktloss(test_id='',
                    out_dir='',
                    replot_only='0',
                    source_filter='',
                    min_values='3',
                    omit_const='0',
                    ymin='0',
                    ymax='0',
                    lnames='',
                    stime='0.0',
                    etime='0.0',
                    out_name='',
                    pdf_dir='',
                    ts_correct='1',
                    plot_params='',
                    plot_script='',
                    burst_sep='0.0',
                    sburst='1',
                    eburst='0',
                    seek_window='',
                    log_loss='2'):
    "Plot per-flow packet loss events vs time (or cumlative over time)"

    if log_loss != '1' and log_loss != '2':
        abort(
            "Must set log_loss=1 (pkt loss events) or log_loss=2 (cumulative pkt loss)"
        )

    (test_id_arr, out_files,
     out_groups) = _extract_owd_pktloss(test_id, out_dir, replot_only,
                                        source_filter, ts_correct, burst_sep,
                                        sburst, eburst, seek_window, log_loss)

    (out_files, out_groups) = filter_min_values(out_files, out_groups,
                                                min_values)
    out_name = get_out_name(test_id_arr, out_name)

    burst_sep = float(burst_sep)
    if burst_sep == 0.0:
        plot_time_series(out_name,
                         out_files,
                         'Lost packets',
                         2,
                         1,
                         'pdf',
                         out_name + '_loss2',
                         pdf_dir=pdf_dir,
                         omit_const=omit_const,
                         ymin=float(ymin),
                         ymax=float(ymax),
                         lnames=lnames,
                         stime=stime,
                         etime=etime,
                         groups=out_groups,
                         plot_params=plot_params,
                         plot_script=plot_script,
                         source_filter=source_filter)
    else:
        # Each trial has multiple files containing data from separate bursts detected within the trial
        plot_incast_ACK_series(out_name,
                               out_files,
                               'Lost packets',
                               2,
                               1,
                               'pdf',
                               out_name + '_loss2',
                               pdf_dir=pdf_dir,
                               aggr='',
                               omit_const=omit_const,
                               ymin=float(ymin),
                               ymax=float(ymax),
                               lnames=lnames,
                               stime=stime,
                               etime=etime,
                               groups=out_groups,
                               burst_sep=burst_sep,
                               sburst=int(sburst),
                               plot_params=plot_params,
                               plot_script=plot_script,
                               source_filter=source_filter)

    # done
    puts('\n[MAIN] COMPLETED plotting pktloss %s \n' % out_name)
Exemplo n.º 38
0
def start(config):
    fab.puts("Starting Cassandra..")
    cmd = 'JAVA_HOME={java_home} nohup ~/fab/cassandra/bin/cassandra'.format(
        java_home=config['java_home'])
    fab.run(cmd)
Exemplo n.º 39
0
def clean():
    puts("* Cleaning Repo")
    directories = ['.tox', 'SoftLayer.egg-info', 'build', 'dist']
    for directory in directories:
        if os.path.exists(directory) and os.path.isdir(directory):
            shutil.rmtree(directory)
Exemplo n.º 40
0
def venv_create():
    """
    Create virtualenv environment
    
    The virtualenv is created in ``env.CFG_INVENIO_PREFIX``, and will also
    create ``lib/python/invenio/`` and symlink it the virtualenv's 
    site-packages, as well as ``var/tmp/ooffice-tmp-files`` (via sudo). If 
    ``env.WITH_DEVSCRIPTS`` is ``True``, invenio-devscripts will be installed. 
    If ``env.WITH_WORKDIR`` is ``True`` git-new-workdir will be installed.
    
    Lastly, it will append render the template ``activate-profile.tpl`` and
    append it to ``bin/activate``. The script will setup common needed
    environment variables that e.g. invenio-devscripts depend on.
    
    If an existing environment already exists, the user will be asked for
    confirmation to remove the directory (using sudo, due to the directory
    ``var/tmp/ooffice-tmp-files`` which is created using sudo).
    """
    # Checks
    if 'CFG_INVENIO_PREFIX' not in env:
        abort(red("CFG_INVENIO_PREFIX is not specified in env.") % env)

    puts(cyan(">>> Creating virtualenv in %(CFG_INVENIO_PREFIX)s..." % env))

    # Remove previous installation
    if os.path.exists(env.CFG_INVENIO_PREFIX):
        res = confirm("Remove installation in %(CFG_INVENIO_PREFIX)s ?" % env)
        if not res:
            abort(red("Cannot continue") % env)
        else:
            local("sudo rm -Rf %(CFG_INVENIO_PREFIX)s" % env)

    # Create virtual environment
    dirname = os.path.dirname(env.CFG_INVENIO_PREFIX)
    basename = os.path.basename(env.CFG_INVENIO_PREFIX)

    local("mkdir -p %s" % dirname)
    local("cd %s && virtualenv -p %s %s" % (dirname, env.PYTHON, basename))

    # Create needed symboic links
    pyver = python_version()
    local("mkdir -p %(CFG_INVENIO_PREFIX)s/lib/python/invenio" % env)
    local(("mkdir -p %(CFG_INVENIO_PREFIX)s/lib/python" + pyver +
           "/site-packages") % env)
    local((
        "ln -s %(CFG_INVENIO_PREFIX)s/lib/python/invenio %(CFG_INVENIO_PREFIX)s/lib/python"
        + pyver + "/site-packages/invenio") % env)

    # Write extras into the activate script
    write_template(os.path.join(env.CFG_INVENIO_PREFIX, 'bin/activate'),
                   env,
                   tpl_file='activate-profile.tpl',
                   append=True,
                   mark="ACTIVATE_PROFILE")

    # Install devscripts
    if env.WITH_DEVSCRIPTS:
        puts(">>> Installing invenio-devscripts...")
        local(
            "cd %(CFG_INVENIO_PREFIX)s && git clone https://github.com/tiborsimko/invenio-devscripts.git"
            % env)
        local("cd %(CFG_INVENIO_PREFIX)s && mv invenio-devscripts/* bin/" %
              env)

    if env.WITH_WORKDIR:
        puts(">>> Installing git-new-workdir...")
        local(
            'wget -O %(CFG_INVENIO_PREFIX)s/bin/git-new-workdir "http://repo.or.cz/w/git.git/blob_plain/HEAD:/contrib/workdir/git-new-workdir"'
            % env)
        local("chmod +x %(CFG_INVENIO_PREFIX)s/bin/git-new-workdir" % env)

    # OpenOffice temporary directory
    local("mkdir -p %(CFG_INVENIO_PREFIX)s/var/tmp/ooffice-tmp-files" % env)
    local(
        "sudo chown -R nobody %(CFG_INVENIO_PREFIX)s/var/tmp/ooffice-tmp-files"
        % env)
    local(
        "sudo chmod -R 755 %(CFG_INVENIO_PREFIX)s/var/tmp/ooffice-tmp-files" %
        env)
Exemplo n.º 41
0
def install_package(package, platform):

    install = prompt("\n\nDo you want to install " + package + "? (yes/no)")

    if install[0].lower() == "y" and platform == "ubuntu":
        puts(green('\nTrying to install ' + package))
        local('sudo apt-get install -y ' + package)
        puts(green('\nDone installing ' + package))

    elif install[0].lower() == "y" and (platform == "centos"
                                        or platform == "redhat"):
        puts(green('\nTrying to install ' + package))
        local('sudo yum install -y ' + package)
        puts(green('\nDone installing ' + package))

    elif install[0].lower() == "n" and platform == "ubuntu":
        puts(
            red('Please, manually install these package before running this script again: sudo apt-get install '
                + package))
        exit()
    elif install[0].lower() == "n" and (platform == "centos"
                                        or platform == "redhat"):
        puts(
            red('Please, manually install this package before running this script again: sudo yum install '
                + package))
        exit()
    else:
        puts(
            red('Please, manually install required package before running this script again '
                + package))
        exit()
Exemplo n.º 42
0
def execute(category, release, include=None, exclude=None):
    """
    Run all step in the specified stage for the specified release

    :param category: The category of stages to run (before or after)
    :param release: The name of the release on the host
    :param include: A iterable of names of the steps to be run, all other
                    steps are skipped
    :param exclude: A iterable of names of the steps to be skipped, all
                    other steps are run
    :return: None
    """
    if include is not None and exclude is not None:
        return fab.abort("You cannot supply include and exclude values")

    if 'stages' not in fab.env.config:
        return fab.warn("No stages defined in your config")

    if category not in fab.env.config['stages']:
        return fab.warn("No stages defined in the '%s' category" % category)

    for step in fab.env.config['stages'][category]:
        if 'id' not in step:
            fab.warn("No 'id' defined for step: %s" % step)
            continue

        if include is not None:
            if step['id'] not in include:
                fab.puts("Step '%s' is not in our include list, skipping..." %
                         step['id'])
                continue

        if exclude is not None:
            if step['id'] in exclude:
                fab.puts("Step '%s' is in our exclude list, skipping..." %
                         step['id'])
                continue

        if 'commands' not in step:
            fab.warn("No 'commands' defined for step: %s" % step)
            continue

        if not isinstance(step['commands'], (list, tuple)):
            fab.warn("The supplied commands are no in the correct format: %s" %
                     step['commands'])
            continue

        roles = step.get('roles', ['all'])

        fab.puts("\nRunning step: %s (roles: %s)" % (step['id'],
                                                     ", ".join(roles)))

        prefix = variable_template(step.get('prefix'))
        cd = variable_template(step.get('cd'))

        if 'shell_env' in step:
            shell_env = {}
            for env_name in step['shell_env']:
                shell_env[env_name] = variable_template(
                    step['shell_env'][env_name]
                )
        else:
            shell_env = None

        commands = [
            variable_template(command)
            for command in step['commands']
        ]

        fab.execute(run, commands, prefix, cd, shell_env,
                    roles=roles)
Exemplo n.º 43
0
def setup_virtual_env():
    """
    Create the virtualenv.
    """
    puts(green("Setting up Virtual Environment"))
    local('cd %s; virtualenv venv --no-site-packages' % OTB_DIR)
Exemplo n.º 44
0
def check_platform():
    """
    Check to see what platfom we are running
    """
    puts(green("Checking your platform..."))
    return platform.dist()[0].lower()
Exemplo n.º 45
0
 def do(self):
     authorized_files = self.get_authorized_files(
         exclude_users=self.conf.exclude_users)
     for user, authorized_file in authorized_files:
         puts(authorized_file)
Exemplo n.º 46
0
def all_deps():
    '''Locally install all dependencies.'''
    puts(green("Installing all dependencies"))
    activate_venv('pip install -r %srequirements.txt' % (OTB_DIR))
Exemplo n.º 47
0
def analyse_cmpexp(
        exp_list='experiments_completed.txt',
        res_dir='',
        out_dir='',
        source_filter='',
        min_values='3',
        omit_const='0',
        metric='throughput',
        ptype='box',
        variables='',
        out_name='',
        ymin='0',
        ymax='0',
        lnames='',
        group_by_prefix='0',
        omit_const_xlab_vars='0',
        replot_only='0',
        pdf_dir='',
        stime='0.0',
        etime='0.0',
        ts_correct='1',
        smoothed='1',
        link_len='0',
        plot_params='',
        plot_script='',
        stat_index='',
        dupacks='0',
        cum_ackseq='1',
        merge_data='0',
        sburst='1',
        #eburst='0', test_id_prefix='[0-9]{8}\-[0-9]{6}_experiment_',
        eburst='0',
        test_id_prefix='exp_[0-9]{8}\-[0-9]{6}_',
        slowest_only='0',
        res_time_mode='0',
        query_host=''):
    "Compare metrics for different experiments"

    if ptype != 'box' and ptype != 'mean' and ptype != 'median':
        abort('ptype must be either box, mean or median')

    check = get_metric_params(metric, smoothed, ts_correct)
    if check == None:
        abort('Unknown metric %s specified' % metric)

    if source_filter == '':
        abort('Must specify at least one source filter')

    if len(source_filter.split(';')) > 12:
        abort('Cannot have more than 12 filters')

    # prevent wrong use of res_time_mode
    if metric != 'restime' and res_time_mode != '0':
        res_time_mode = '0'
    if ptype == 'box' and res_time_mode == '2':
        res_time_mode = '0'

    # XXX more param checking

    # Initialise source filter data structure
    sfil = SourceFilter(source_filter)

    # read test ids
    experiments = read_experiment_ids(exp_list)

    # get path based on first experiment id
    dir_name = get_first_experiment_path(experiments)

    # if we haven' got the extracted data run extract method(s) first
    if res_dir == '':
        for experiment in experiments:

            (ex_function,
             kwargs) = get_extract_function(metric,
                                            link_len,
                                            stat_index,
                                            sburst=sburst,
                                            eburst=eburst,
                                            slowest_only=slowest_only,
                                            query_host=query_host)

            (dummy, out_files,
             out_groups) = ex_function(test_id=experiment,
                                       out_dir=out_dir,
                                       source_filter=source_filter,
                                       replot_only=replot_only,
                                       ts_correct=ts_correct,
                                       **kwargs)

        if out_dir == '' or out_dir[0] != '/':
            res_dir = dir_name + '/' + out_dir
        else:
            res_dir = out_dir
    else:
        if res_dir[0] != '/':
            res_dir = dir_name + '/' + res_dir

    # make sure we have trailing slash
    res_dir = valid_dir(res_dir)

    if pdf_dir == '':
        pdf_dir = res_dir
    else:
        if pdf_dir[0] != '/':
            pdf_dir = dir_name + '/' + pdf_dir
        pdf_dir = valid_dir(pdf_dir)
        # if pdf_dir specified create if it doesn't exist
        mkdir_p(pdf_dir)

    #
    # build match string from variables
    #

    (match_str, match_str2) = build_match_strings(experiments[0], variables,
                                                  test_id_prefix)

    #
    # filter out the experiments to plot, generate x-axis labels, get test id prefix
    #

    (fil_experiments, test_id_pfx,
     xlabs) = filter_experiments(experiments, match_str, match_str2)

    #
    # get out data files based on filtered experiment list and source_filter
    #

    (ext, ylab, yindex, yscaler, sep, aggr,
     diff) = get_metric_params(metric, smoothed, ts_correct, stat_index,
                               dupacks, cum_ackseq, slowest_only)

    if res_time_mode == '1':
        plot_params += ' NOMINAL_RES_TIME="1"'
    if res_time_mode == '2':
        if ptype == 'median':
            ylab = 'Median resp time / nominal resp time'
        elif ptype == 'mean':
            ylab = 'Mean resp time / nominal resp time'
        plot_params += ' RATIO_RES_TIME="1"'

    leg_names = source_filter.split(';')

    # if we merge responders make sure we only use the merged files
    if merge_data == '1':
        # set label to indicate merged data
        leg_names = ['Merged data']
        # reset source filter so we match the merged file
        sfil.clear()
        source_filter = 'S_0.0.0.0_0'
        sfil = SourceFilter(source_filter)

    file_names = []
    for experiment in fil_experiments:
        out_files = {}
        _ext = ext

        files = get_testid_file_list('', experiment, '%s' % _ext,
                                     'LC_ALL=C sort', res_dir)
        if merge_data == '1':
            # change extension
            _ext += '.all'
            files = merge_data_files(files)

        #print(files)
        match_str = '.*_([0-9\.]*_[0-9]*_[0-9\.]*_[0-9]*)[0-9a-z_.]*' + _ext
        for f in files:
            # print(f)
            res = re.search(match_str, f)
            #print(res.group(1))
            if res and sfil.is_in(res.group(1)):
                # only add file if enough data points
                rows = int(
                    local('wc -l %s | awk \'{ print $1 }\'' % f, capture=True))
                if rows > int(min_values):
                    out_files[res.group(1)] = f

        #print(out_files)
        #print(leg_names)
        if len(out_files) < len(leg_names):
            abort(
                'No data files for some of the source filters for experiment %s'
                % experiment)

        sorted_files = sort_by_flowkeys(out_files, source_filter)

        for name, file_name in sorted_files:
            file_names.append(file_name)

    if group_by_prefix == '1':
        # group by test prefix (and flow)

        # first, get all test id prefixes
        test_id_pfxs = {}
        for experiment in fil_experiments:
            res = re.search(match_str2, experiment)
            if res:
                test_id_pfxs[res.group(1)] = 1

        # second, sort files so that same parameter combinations for different
        # prefixes are together
        # if we have multiple prefixes, create legend entry for each
        # prefix+flow combination
        _file_names = [''] * len(file_names)
        _leg_names = []
        pfx_cnt = len(test_id_pfxs)
        i = 0
        j = -1
        last_pfx = ''
        for name in file_names:
            for p in test_id_pfxs:
                if name.find(p) > -1:
                    curr_pfx = p
                    break

            if curr_pfx != last_pfx:
                i = 0
                j += 1
                for l in leg_names:
                    _leg_names.append(curr_pfx + '-' + l)

            _file_names[i * pfx_cnt + j] = name

            i += 1
            last_pfx = curr_pfx

        file_names = _file_names
        leg_names = _leg_names

        # remove duplicates in the x-axis labels
        xlabs = list(set(xlabs))

    if lnames != '':
        lnames_arr = lnames.split(';')
        if len(lnames_arr) != len(leg_names):
            abort(
                'Number of legend names must be qual to the number of source filters'
            )
        leg_names = lnames_arr

    # filter out unchanged variables in the x labels (need at least 2 labels)
    if omit_const_xlab_vars == '1' and len(xlabs) > 1:

        xlabs_arrs = {}
        xlabs_changed = {}

        for i in range(len(xlabs)):
            xlabs_arrs[i] = xlabs[i].split('\n')

        for i in range(len(xlabs_arrs[0])):
            changed = False
            xlab_var = xlabs_arrs[0][i]
            for j in range(1, len(xlabs)):
                if xlabs_arrs[j][i] != xlab_var:
                    changed = True
                    break

            xlabs_changed[i] = changed

        for i in range(len(xlabs)):
            tmp = []
            for j in range(len(xlabs_arrs[i])):
                if xlabs_changed[j]:
                    tmp.append(xlabs_arrs[i][j].replace('_', ' ', 1))

            xlabs[i] = '\n'.join(tmp)

    print(leg_names)
    print(file_names)

    #
    # pass the data files and auxilary info to plot function
    #

    if out_name != '':
        oprefix = out_name + '_' + test_id_pfx + '_' + metric + '_' + ptype
    else:
        oprefix = test_id_pfx + '_' + metric + '_' + ptype
    title = oprefix

    plot_cmpexp(title, file_names, xlabs, ylab, yindex, yscaler, 'pdf',
                oprefix, pdf_dir, sep, aggr, diff, omit_const, ptype, ymin,
                ymax, leg_names, stime, etime, plot_params, plot_script)

    # done
    puts('\n[MAIN] COMPLETED analyse_cmpexp %s \n' % test_id_pfx)
Exemplo n.º 48
0
def check_mysql_connection(user, password, host, database, port):
    """
    Check that the MySQL connection works.
    """
    # MySQLdb is installed from the pip file so we don't want to
    # import it until now.
    import MySQLdb

    puts(green("Checking MySQL connection and packages"))

    try:
        dbase = MySQLdb.connect(host=host,
                                port=int(port),
                                user=user,
                                passwd=password,
                                db=database)

        cursor = dbase.cursor()
        cursor.execute("SELECT VERSION()")
        results = cursor.fetchone()
        # Check if anything at all is returned
        if results:
            puts(green("MySQL connection successful."))
    except:
        puts(red("ERROR IN CONNECTION"))
        puts(red("Install cannot continue without valid database connection."))
        puts(red("Please verify your database credentials and try again."))
        exit()
    puts(green("You are connected to MySQL Server"))
    return False
Exemplo n.º 49
0
 def do(self):
     try:
         release = self.release(self.conf.release_name)
         puts('%s release: %s' % (self.conf.release_name, release))
     except ValueError:
         puts('Release %s does not exits' % self.conf.release_name)
Exemplo n.º 50
0
def analyse_2d_density(
        exp_list='experiments_completed.txt',
        res_dir='',
        out_dir='',
        source_filter='',
        min_values='3',
        xmetric='throughput',
        ymetric='tcprtt',
        variables='',
        out_name='',
        xmin='0',
        xmax='0',
        ymin='0',
        ymax='0',
        lnames='',
        group_by='aqm',
        replot_only='0',
        pdf_dir='',
        ts_correct='1',
        smoothed='1',
        link_len='0',
        plot_params='',
        plot_script='',
        xstat_index='',
        ystat_index='',
        dupacks='0',
        cum_ackseq='1',
        merge_data='0',
        #sburst='1', eburst='0', test_id_prefix='[0-9]{8}\-[0-9]{6}_experiment_',
        sburst='1',
        eburst='0',
        test_id_prefix='exp_[0-9]{8}\-[0-9]{6}_',
        slowest_only='0',
        query_host=''):
    "2d density / ellipse plot for different experiments"

    test_id_pfx = ''

    check = get_metric_params(xmetric, smoothed, ts_correct)
    if check == None:
        abort('Unknown metric %s specified with xmetric' % xmetric)
    check = get_metric_params(ymetric, smoothed, ts_correct)
    if check == None:
        abort('Unknown metric %s specified with ymetric' % ymetric)

    #if source_filter == '':
    #    abort('Must specify at least one source filter')

    if len(source_filter.split(';')) > 12:
        abort('Cannot have more than 12 filters')

    # XXX more param checking

    # make sure res_dir has valid form (out_dir is handled by extract methods)
    res_dir = valid_dir(res_dir)

    # Initialise source filter data structure
    sfil = SourceFilter(source_filter)

    # read test ids
    experiments = read_experiment_ids(exp_list)

    # get path based on first experiment id
    dir_name = get_first_experiment_path(experiments)

    # if we haven' got the extracted data run extract method(s) first
    if res_dir == '':
        for experiment in experiments:

            (ex_function,
             kwargs) = get_extract_function(xmetric,
                                            link_len,
                                            xstat_index,
                                            sburst=sburst,
                                            eburst=eburst,
                                            slowest_only=slowest_only,
                                            query_host=query_host)

            (dummy, out_files,
             out_groups) = ex_function(test_id=experiment,
                                       out_dir=out_dir,
                                       source_filter=source_filter,
                                       replot_only=replot_only,
                                       ts_correct=ts_correct,
                                       **kwargs)

            (ex_function,
             kwargs) = get_extract_function(ymetric,
                                            link_len,
                                            ystat_index,
                                            sburst=sburst,
                                            eburst=eburst,
                                            slowest_only=slowest_only,
                                            query_host=query_host)

            (dummy, out_files,
             out_groups) = ex_function(test_id=experiment,
                                       out_dir=out_dir,
                                       source_filter=source_filter,
                                       replot_only=replot_only,
                                       ts_correct=ts_correct,
                                       **kwargs)

        if out_dir == '' or out_dir[0] != '/':
            res_dir = dir_name + '/' + out_dir
        else:
            res_dir = out_dir

    else:
        if res_dir[0] != '/':
            res_dir = dir_name + '/' + res_dir

    # make sure we have trailing slash
    res_dir = valid_dir(res_dir)

    if pdf_dir == '':
        pdf_dir = res_dir
    else:
        if pdf_dir[0] != '/':
            pdf_dir = dir_name + '/' + pdf_dir
        pdf_dir = valid_dir(pdf_dir)
        # if pdf_dir specified create if it doesn't exist
        mkdir_p(pdf_dir)

    #
    # build match string from variables
    #

    (match_str, match_str2) = build_match_strings(experiments[0], variables,
                                                  test_id_prefix)

    #
    # filter out the experiments to plot, generate x-axis labels, get test id prefix
    #

    (fil_experiments, test_id_pfx,
     dummy) = filter_experiments(experiments, match_str, match_str2)

    #
    # get groups based on group_by variable
    #

    group_idx = 1
    levels = {}
    groups = []
    leg_names = []
    _experiments = []
    for experiment in fil_experiments:
        level = ''
        add_exp = True
        for g in group_by.split(';'):
            p = experiment.find(g)
            if p > -1:
                s = experiment.find('_', p)
                s += 1
                e = experiment.find('_', s)
                level += g + ':' + experiment[s:e] + ' '
            else:
                add_exp = False
                break

        # remove the final space from the string
        level = level[:-1]

        if add_exp == True:
            _experiments.append(experiment)
            #print('level: ' + level)

            if level not in levels:
                levels[level] = group_idx
                group_idx += 1
                leg_names.append(level)

            if merge_data == '1':
                groups.append(levels[level])
            else:
                for i in range(len(source_filter.split(';'))):
                    groups.append(levels[level])

    fil_experiments = _experiments

    #
    # get metric parameters and list of data files
    #

    # get the metric parameter for both x and y
    x_axis_params = get_metric_params(xmetric, smoothed, ts_correct,
                                      xstat_index, dupacks, cum_ackseq,
                                      slowest_only)
    y_axis_params = get_metric_params(ymetric, smoothed, ts_correct,
                                      ystat_index, dupacks, cum_ackseq,
                                      slowest_only)

    x_ext = x_axis_params[0]
    y_ext = y_axis_params[0]

    # if we merge responders make sure we only use the merged files
    if merge_data == '1':
        # reset source filter so we match the merged file
        sfil.clear()
        sfil = SourceFilter('S_0.0.0.0_0')

    x_files = []
    y_files = []
    for experiment in fil_experiments:
        _x_files = []
        _y_files = []
        _x_ext = x_ext
        _y_ext = y_ext

        _files = get_testid_file_list('', experiment, _x_ext, 'LC_ALL=C sort',
                                      res_dir)
        if merge_data == '1':
            _x_ext += '.all'
            _files = merge_data_files(_files)
        _x_files += _files

        _files = get_testid_file_list('', experiment, _y_ext, 'LC_ALL=C sort',
                                      res_dir)
        if merge_data == '1':
            _y_ext += '.all'
            _files = merge_data_files(_files)
        _y_files += _files

        match_str = '.*_([0-9\.]*_[0-9]*_[0-9\.]*_[0-9]*)[0-9a-z_.]*' + _x_ext
        for f in _x_files:
            #print(f)
            res = re.search(match_str, f)
            #print(res.group(1))
            if res and sfil.is_in(res.group(1)):
                # only add file if enough data points
                rows = int(
                    local('wc -l %s | awk \'{ print $1 }\'' % f, capture=True))
                if rows > int(min_values):
                    x_files.append(f)

        match_str = '.*_([0-9\.]*_[0-9]*_[0-9\.]*_[0-9]*)[0-9a-z_.]*' + _y_ext
        for f in _y_files:
            # print(f)
            res = re.search(match_str, f)
            if res and sfil.is_in(res.group(1)):
                # only add file if enough data points
                rows = int(
                    local('wc -l %s | awk \'{ print $1 }\'' % f, capture=True))
                if rows > int(min_values):
                    y_files.append(f)

    yindexes = [str(x_axis_params[2]), str(y_axis_params[2])]
    yscalers = [str(x_axis_params[3]), str(y_axis_params[3])]
    aggr_flags = [x_axis_params[5], y_axis_params[5]]
    diff_flags = [x_axis_params[6], y_axis_params[6]]

    if lnames != '':
        lnames_arr = lnames.split(';')
        if len(lnames_arr) != len(leg_names):
            abort(
                'Number of legend names must be qual to the number of source filters'
            )
        leg_names = lnames_arr

    print(x_files)
    print(y_files)
    print(groups)
    print(leg_names)

    #
    # pass the data files and auxilary info to plot function
    #

    if out_name != '':
        oprefix = out_name + '_' + test_id_pfx + '_' + xmetric + '_' + ymetric
    else:
        oprefix = test_id_pfx + '_' + xmetric + '_' + ymetric
    title = oprefix

    plot_2d_density(title, x_files, y_files, x_axis_params[1],
                    y_axis_params[1], yindexes, yscalers, 'pdf', oprefix,
                    pdf_dir, x_axis_params[4], y_axis_params[4], aggr_flags,
                    diff_flags, xmin, xmax, ymin, ymax, groups, leg_names,
                    plot_params, plot_script)

    # done
    puts('\n[MAIN] COMPLETED analyse_2d_density %s \n' % test_id_pfx)
Exemplo n.º 51
0
def rmdir(path, force=False):
    """Remove a directory and it's contents upon a user confirmation."""
    if force or confirm('Do you really want to remove {}'.format(path)):
        puts('removing {}'.format(path))
        sudo('rm -rf {}'.format(path))
Exemplo n.º 52
0
def update():
	start = time.time()
	run('comand')
	final = time.time()
	total = final - start 
	puts('Fim...' %total)
Exemplo n.º 53
0
def get_netint(int_no=0, windump='0', internal_int='1'):
    "Get network interface name"

    # need to convert if we run task from command line
    int_no = int(int_no)

    # check int_no paramter
    if int_no < 0:
        int_no = 0
    if int_no >= len(config.TPCONF_host_internal_ip[env.host_string]):
        int_no = len(config.TPCONF_host_internal_ip[env.host_string]) - 1

    # get type of current host
    htype = get_type_cached(env.host_string)

    if htype == 'FreeBSD' or htype == 'Linux' or htype == 'Darwin':
        # get  ip and set last octet to 0
        if internal_int == '1':
            iip = config.TPCONF_host_internal_ip[env.host_string][int_no]
        else:
            iip = socket.gethostbyname(env.host_string.split(':')[0])

        a = iip.split('.')
        del a[3]
        iip = '.'.join(a)

        int_name = ''
        field_idx = -1
        lines = run('netstat -nr', shell=False)
        for line in lines.split('\n'):
            if line != '':
                fields = line.split()
                if len(fields) > 0 and fields[0] == 'Destination' and  \
                    int_name == '' :
                    for i in range(len(fields)):
                        if fields[i] == 'Netif':
                            field_idx = i
                if len(fields) > 0 and (fields[0].split('/')[0] == iip + '.0'
                                        or fields[0].split('/')[0] == iip):
                    int_name = fields[field_idx]

        #puts('Interface: %s' % int_name)
        return int_name

    elif htype == "CYGWIN":
        # on windows we have two numbers
        # windows numbering of interfaces
        # numbering used by windump

        if windump == '0':

            # get interface IPs and numbers
            output = run(
                'ipconfig | egrep "Local Area|IPv4" | grep -v "Tunnel"',
                pty=False)

            lines = output.split("\n")
            for i in range(0, len(lines), 2):
                int_num = lines[i].replace(":", "").split(" ")[-1]
                if int_num == "":  # XXX not sure what we are doing here
                    int_num = "1"
                int_ip = lines[i + 1].split(":")[1].strip()

                if internal_int == '1' and int_ip == config.TPCONF_host_internal_ip[
                        env.host_string][int_no] or \
                   internal_int == '0' and int_ip == socket.gethostbyname(
                        env.host_string.split(':')[0]):
                    puts('Interface: %s' % int_num)
                    return int_num

        else:
            # get list of interface numbers and interface IDs
            output = run(
                'winDUmp -D | sed "s/\([0-9]\)\.[^{]*{\([^}]*\).*/\\1 \\2/"',
                pty=False)

            # get list of interface macs and interface IDs
            output2 = run(
                'getmac | '
                'grep "^[0-9]" | sed "s/^\([0-9A-Fa-f-]*\)[^{]*{\([^}]*\).*/\\1 \\2/"',
                pty=False)

            # get mac of the internal/external interface
            mac = execute(get_netmac,
                          internal_int=internal_int,
                          hosts=[env.host_string]).values()[0]

            # find interface ID
            int_id = ''
            lines = output2.split("\n")
            for line in lines:
                _int_mac, _int_id = line.split(' ')

                # get mac print with '-' instead of ':'
                _int_mac = _int_mac.replace('-', ':').lower()
                if _int_mac == mac:
                    int_id = _int_id
                    break

            # get interface number (could use ID but it's a bit long)
            lines = output.split("\n")
            for line in lines:
                _int_num, _int_id = line.split(' ')
                if _int_id == int_id:
                    puts('Interface: %s' % _int_num)
                    return _int_num

    else:
        abort('Cannot determine network interface for OS %s' % htype)
Exemplo n.º 54
0
def msg(text):
    puts('\n\n------------------[ ' + text + ' ]------------------')
Exemplo n.º 55
0
    from config_empacotar import CAMINHO_EXT_DEV, \
        SIGLA_EMPRESA, NOME_EMPRESA
except Exception as e:
    CAMINHO_EXT_DEV = _abs('examples')
    SIGLA_EMPRESA = None
    NOME_EMPRESA = None
    deve_gerar_config = True

Dependency = namedtuple('Dependency', 'name link predicate post')

ENV_NAME = 'colibri'
WORKON = r'workon {}'.format(ENV_NAME)
WORKON_HOME = os.environ.get('WORKON_HOME')
if not WORKON_HOME:
    puts(
        'Por favor crie a variável de ambiente WORKON_HOME conforme documentação'
    )
    exit(1)
WORKON_HOME = os.path.join(WORKON_HOME, ENV_NAME)
PY_LAUNCHER = 'py -3.7'
with hide('output', 'running', 'warnings'):
    PYTHON = local(
        f'{PY_LAUNCHER} -c "import sys; import os; print(os.path.dirname(sys.executable))"',
        capture=True)
TCL_PATH = os.path.join(PYTHON, 'tcl')
INNO_SETUP_DOWNLOAD = r'https://s3.amazonaws.com/ncr-colibri/install/innosetup6-unicode.exe'
INNO_REG_PATH5 = u'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\Inno Setup 5_is1'
INNO_REG_PATH6 = u'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\Inno Setup 6_is1'
INNO_REG_KEY = u'InstallLocation'
COLIBRI_REG_PATH = u'HKEY_LOCAL_MACHINE\\Software\\NCR\\Brasil'
COLIBRI_REG_KEY = u'NCRSolution'
Exemplo n.º 56
0
def rmfiles(path, force=False):
    """Remove a directory contents upon a user confirmation."""
    if force or confirm(
            'Do you really want to purge {} contents?'.format(path)):
        puts('purging {} contents'.format(path))
        sudo('rm -rf {}'.format(mkpath(path).child('*')))
Exemplo n.º 57
0
def log(message, color=colors.yellow, output=sys.stdout):
    with utils.patch(sys, 'stdout', output):
        fab.puts(color(message))
Exemplo n.º 58
0
def instalar_innosetup():
    """
    Instala innosetup unicode nesta máquina.
    """
    dest_file = _abs('isccsetup.exe')
    try:
        os.unlink(dest_file)
    except:
        pass
    putsc('Innosetup')
    puts(' Baixando...')
    puts(' Ao instalar mantenha o "Install Inno Setup Preprocessor" marcado')
    _download_file(INNO_SETUP_DOWNLOAD, dest_file)
    puts(' Instalando...')
    ret = call(dest_file, shell=True)
    if ret == 0:
        puts(' Instalado com sucesso')
        puts(' Apagando o arquivo...')
        os.unlink(dest_file)
    else:
        puts(' Falhou com erro: {}'.format(ret))
    return ret
Exemplo n.º 59
0
def local_ls():
    puts("local ls")
    local('ls -lta')
Exemplo n.º 60
0
def check(boolean, message):
    if not boolean:
        puts(red(message))
        sys.exit(1)