Esempio n. 1
0
 def put_template(self, local, remote):
     """
     set template file
     """
     if os.path.exists(local) == False:
         error("File not found: %s" % local)
     put(local, remote)
Esempio n. 2
0
def archive_official(gc, image, dry_run, project):
    """Archive image by moving it to the <NECTAR_ARCHIVES> project.
    If the contributed flag is set
    please specify the contributed archive project id.
    """
    name = image.name
    try:
        build = '[v%s]' % image.nectar_build
    except AttributeError:
        error("nectar_build not found for image.")

    # Add build number to name if it's not already there
    # E.g. NeCTAR Ubuntu 17.10 LTS (Artful) amd64 (v10)
    if build not in name:
        name = '%s %s' % (name, build)

    if dry_run:
        print("Running in dry run mode")
        print("Would archive image {} ({}) to project {} ({})"
              .format(name, image.id,
                      project.name, project.id))
        if 'murano_image_info' in image:
            print('Would remove murano image properties from {}'
                  .format(image.id))
    else:
        print("Archiving image {} ({}) to project {} ({})"
              .format(name, image.id, project.name, project.id))
        gc.images.update(image.id, name=name, owner=project.id,
                         visibility='community')

        if 'murano_image_info' in image:
            print('Removing murano image properties from {}'.format(image.id))
            gc.images.update(image.id, remove_props=['murano_image_info'])
Esempio n. 3
0
def deploy(tag=None):
    '''Pull latest master from origin, build new docker image, restart derby-examples services'''
    if tag is None:
        if 'deploy_tag' not in lever:
            error(
                "Must supply a tag to deploy. Use the tag task to build and select a tag, or 'deploy:<tag>'"
            )
        else:
            tag = lever['deploy_tag']
    # TODO Start redis/mongo services if not running
    if not exists('%s/%s' % (lever['repo_cache'], lever['repo_name'])):
        execute(clone)
    with cd('%s/%s' % (lever['repo_cache'], lever['repo_name'])):
        run('git fetch')
        run('git reset --hard %s' % tag)
        run('docker build -t derbyjs/derby-examples .')
    sleep(2)

    # Ensure dependent services running
    execute(start)

    # Restart app
    sudo('stop derbyjs/derby-examples', warn_only=True)
    sudo('stop derbyjs/component-examples', warn_only=True)
    sudo('start derbyjs/derby-examples')
    sudo('start derbyjs/component-examples')
Esempio n. 4
0
def gitrepos(branch=None, fork='sympy'):
    """
    Clone the repo

    fab vagrant prepare (namely, checkout_cache()) must be run first. By
    default, the branch checked out is the same one as the one checked out
    locally. The master branch is not allowed--use a release branch (see the
    README). No naming convention is put on the release branch.

    To test the release, create a branch in your fork, and set the fork
    option.
    """
    with cd("/home/vagrant"):
        if not exists("sympy-cache.git"):
            error("Run fab vagrant prepare first")
    if not branch:
        # Use the current branch (of this git repo, not the one in Vagrant)
        branch = local("git rev-parse --abbrev-ref HEAD", capture=True)
    if branch == "master":
        raise Exception("Cannot release from master")
    run("mkdir -p repos")
    with cd("/home/vagrant/repos"):
        run("git clone --reference ../sympy-cache.git https://github.com/{fork}/sympy.git"
            .format(fork=fork))
        with cd("/home/vagrant/repos/sympy"):
            run("git checkout -t origin/%s" % branch)
def get_bundler_config():
    for prefix in config_file_locations:
        location = join(prefix, config_file_suffix)
        if fabric.contrib.files.exists(location, use_sudo=True):
            return location

    error('Could not find bundler config file.')
Esempio n. 6
0
def get_bundler_config():
    for prefix in config_file_locations:
        location = join(prefix, config_file_suffix)
        if fabric.contrib.files.exists(location, use_sudo=True):
            return location

    error('Could not find bundler config file.')
Esempio n. 7
0
def add_plugin(name, image=None, dockerfile='Dockerfile', build_context=None,
               should_restart=True):
    """
    Add a new plugin by modifying skygear server configuration

    If an image is specified, it will be treated as a Docker repository image
    and pulled from the repository. If an image is not specified, a build
    directory is configured where you should upload your plugin via git.

    Skygear Server is restarted automatically by default if an image is
    specified.

    If your Dockerfile is not at the project root, you should specify
    an alternative Dockerfile location and build context.
    """
    config_file = '/home/ubuntu/myapp/development.ini'
    service_name = "plugin_{0}".format(name)
    with cd("myapp"):
        data = read_compose_override()
        if service_name in data.get('services', {}):
            error("Plugin '{0}' already exists.".format(name))
            return
        augtool(r"""
        set /files{0}/plugin\ \"{1}\"/transport http
        set /files{0}/plugin\ \"{1}\"/path http://{1}:8000
        """.format(config_file, service_name))
        data = add_docker_plugin(data, name, image, dockerfile, build_context)
        write_compose_override(data)
    if image is None:
        puts("""Plugin '{0}' is added to Skygear. To upload plugin, add
'git@<ip address>:{0}' as a git remote and push your code.""".format(name))
        return
    if should_restart:
        restart(should_recreate=True)
Esempio n. 8
0
def update_sympy_org(website_location=None):
    """
    Update sympy.org

    This just means adding an entry to the news section.
    """
    website_location = website_location or get_location("sympy.github.com")

    # Check that the website directory is clean
    local("cd {website_location} && git diff --exit-code > /dev/null".format(website_location=website_location))
    local(
        "cd {website_location} && git diff --cached --exit-code > /dev/null".format(website_location=website_location)
    )

    release_date = time.gmtime(os.path.getctime(os.path.join("release", tarball_formatter()["source"])))
    release_year = str(release_date.tm_year)
    release_month = str(release_date.tm_mon)
    release_day = str(release_date.tm_mday)
    version = get_sympy_version()

    with open(os.path.join(website_location, "templates", "index.html"), "r") as f:
        lines = f.read().split("\n")
        # We could try to use some html parser, but this way is easier
        try:
            news = lines.index(r"    <h3>{% trans %}News{% endtrans %}</h3>")
        except ValueError:
            error("index.html format not as expected")
        lines.insert(
            news + 2,  # There is a <p> after the news line. Put it
            # after that.
            r"""        <span class="date">{{ datetime("""
            + release_year
            + """, """
            + release_month
            + """, """
            + release_day
            + """) }}</span> {% trans v='"""
            + version
            + """' %}Version {{ v }} released{% endtrans %} (<a href="https://github.com/sympy/sympy/wiki/Release-Notes-for-"""
            + version
            + """">{% trans %}changes{% endtrans %}</a>)<br/>
    </p><p>""",
        )

    with open(os.path.join(website_location, "templates", "index.html"), "w") as f:
        print("Updating index.html template")
        f.write("\n".join(lines))

    print("Generating website pages")
    local("cd {website_location} && ./generate".format(website_location=website_location))

    print("Committing")
    local(
        "cd {website_location} && git commit -a -m 'Add {version} to the news'".format(
            website_location=website_location, version=version
        )
    )

    print("Pushing")
    local("cd {website_location} && git push origin".format(website_location=website_location))
Esempio n. 9
0
def get_freshdesk_config(api_key=None,
                         email_config_id='6000071619',
                         group_id='6000208874',
                         domain='dhdnectar.freshdesk.com'):
    """fetch freshdesk API details from config file"""
    msg = '\n'.join([
        'No Freshdesk API key found in your Hivemind config file.',
        '',
        'To find your Freshdesk API key by following the guide here:',
        'https://support.freshdesk.com/support/solutions/'
        'articles/215517-how-to-find-your-api-key',
        '',
        'Then add the following config to your Hivemind configuration',
        'file (~/.hivemind/hivemind/config.ini):',
        '',
        '  [cfg:hivemind_contrib.security.freshdesk]',
        '  api_key = <your api key>',
    ])

    if api_key is None:
        error(msg)

    config = {'api_key': api_key,
              'email_config_id': email_config_id,
              'group_id': group_id,
              'domain': domain}

    return config
Esempio n. 10
0
def curl_and_json(apis, endpoint, **kwargs):
    """Convenience call for curl + json parse"""

    if 'data' in kwargs:
        kwargs['data'] = json.dumps(kwargs['data'])
    tries = DEFAULT_HTTP_TRIES
    resp = None
    while tries > 0:
        try:
            resp = curl(apis, endpoint, **kwargs)
            break
        except Exception as e:
            tries -= 1
            time.sleep(DEFAULT_INTERVAL)
    if tries == 0:
        msg = 'Could not connect to {} after {} tries, giving up'.format(
            ','.join(apis), DEFAULT_HTTP_TRIES
        )
        error(msg)
        raise(Exception(msg))

    try:
        data = json.loads(resp)
    except Exception as e:
        error('Invalid JSON: {}'.format(data), exception=e)
        raise

    return data
Esempio n. 11
0
def _delete_vms(vms, attempts=300, delay=10, ovirt=None):
    """
    Delete oVirt VMs while waiting for disks to be unlocked

    :param list vms:       The list of vms to delete
    :param int attempts:   How many attepmts to make to delete a VM
    :param int delay:      How many seconds to wait between attempts to delete
    :param ovirtsdk.api.API ovirt: An open oVirt API connection
    """
    for attepmt in xrange(0, attempts):
        remaining_vms = []
        for vm in vms:
            if ovirt.vms.get(id=vm.id).status.state != 'down':
                remaining_vms.append(vm)
                continue
            try:
                vm.delete(oVirtParams.Action(async=False))
            except oVirtErrors.RequestError as e:
                if (
                    e.status == 409 and e.reason == 'Conflict' and
                    e.message.find('disks are locked') >= 0
                ):
                    remaining_vms.append(vm)
                elif e.status == 404:
                    pass
                else:
                    raise
        if not remaining_vms:
            return
        sleep(delay)
        vms = remaining_vms
    error("Timed out trying to delete the following VMs: {0}".format(
        ', '.join(vm.name for vm in vms)
    ))
Esempio n. 12
0
def get_previous_version_tag():
    """
    Get the version of the previous release
    """
    # We try, probably too hard, to portably get the number of the previous
    # release of SymPy. Our strategy is to look at the git tags.  The
    # following assumptions are made about the git tags:

    # - The only tags are for releases
    # - The tags are given the consistent naming:
    #    sympy-major.minor.micro[.rcnumber]
    #    (e.g., sympy-0.7.2 or sympy-0.7.2.rc1)
    # In particular, it goes back in the tag history and finds the most recent
    # tag that doesn't contain the current short version number as a substring.
    shortversion = get_sympy_short_version()
    curcommit = "HEAD"
    with cd("/home/vagrant/repos/sympy"):
        while True:
            curtag = run("git describe --abbrev=0 --tags " + curcommit).strip()
            if shortversion in curtag:
                # If the tagged commit is a merge commit, we cannot be sure
                # that it will go back in the right direction. This almost
                # never happens, so just error
                parents = local("git rev-list --parents -n 1 " + curtag,
                                capture=True).strip().split()
                # rev-list prints the current commit and then all its parents
                assert len(parents) == 2, curtag
                curcommit = curtag + "^"  # The parent of the tagged commit
            else:
                print blue("Using {tag} as the tag for the previous "
                           "release.".format(tag=curtag),
                           bold=True)
                return curtag
        error("Could not find the tag for the previous release.")
Esempio n. 13
0
def promote_official(gc, image, dry_run, project):
    """If the supplied image has nectar_name and nectar_build metadata, set
    to public. If there is an image with matching nectar_name and lower
    nectar_build, move that image to the <NECTAR_ARCHIVES> project.
    If the contributed flag is set please specify the contributed project id.
    """
    if dry_run:
        print("Running in dry run mode")

    try:
        name = image.nectar_name
        build = (int(image.nectar_build))
    except AttributeError:
        error("nectar_name or nectar_build not found for image.")

    m_check = partial(match, name, build)
    matchingimages = filter(m_check,
                            gc.images.list(filters={'owner': image.owner}))

    for i in matchingimages:
        archive_official(gc, i, dry_run, project)

    if image.visibility == 'public':
        print("Image {} ({}) already set public"
              .format(image.name, image.id))
    else:
        if dry_run:
            print("Would set image {} ({}) to public"
                  .format(image.name, image.id))
        else:
            print("Setting image {} ({}) to public"
                  .format(image.name, image.id))
            gc.images.update(image.id, visibility='public')
Esempio n. 14
0
def stdout_result(cmd,
                  expected_errors=(),
                  shell=True,
                  sudo=False,
                  quiet=False):
    """
    Runs a command and returns the result, that would be written to `stdout`, as a string. The output itself can
    be suppressed.

    :param cmd: Command to run.
    :type cmd: unicode
    :param expected_errors: If the return code is non-zero, but found in this tuple, it will be ignored. ``None`` is
      returned in this case.
    :type expected_errors: tuple
    :param shell: Use a shell.
    :type shell: bool
    :param sudo: Use `sudo`.
    :type sudo: bool
    :param quiet: If set to ``True``, does not show any output.
    :type quiet: bool
    :return: The result of the command as would be written to `stdout`.
    :rtype: unicode
    """
    which = operations.sudo if sudo else operations.run
    with hide('warnings'):
        result = which(cmd, shell=shell, quiet=quiet, warn_only=True)
    if result.return_code == 0:
        return result

    if result.return_code not in expected_errors:
        error("Received unexpected error code {0} while executing!".format(
            result.return_code))
    return None
Esempio n. 15
0
def gitrepos(branch=None, fork='sympy'):
    """
    Clone the repo

    fab vagrant prepare (namely, checkout_cache()) must be run first. By
    default, the branch checked out is the same one as the one checked out
    locally. The master branch is not allowed--use a release branch (see the
    README). No naming convention is put on the release branch.

    To test the release, create a branch in your fork, and set the fork
    option.
    """
    with cd("/home/vagrant"):
        if not exists("sympy-cache.git"):
            error("Run fab vagrant prepare first")
    if not branch:
        # Use the current branch (of this git repo, not the one in Vagrant)
        branch = local("git rev-parse --abbrev-ref HEAD", capture=True)
    if branch == "master":
        raise Exception("Cannot release from master")
    run("mkdir -p repos")
    with cd("/home/vagrant/repos"):
        run("git clone --reference ../sympy-cache.git https://github.com/{fork}/sympy.git".format(fork=fork))
        with cd("/home/vagrant/repos/sympy"):
            run("git checkout -t origin/%s" % branch)
def environment():
    """Set the environment where the tasks will be executed"""

    name = os.environ.setdefault('PROJECT_ENV', 'dev')

    try:
        import project_cfg
    except ImportError:
        print('The project_cfg file is required but could not be imported.')
        sys.exit(1)

    if name not in project_cfg.environments:
        error(colors.red('Environment `{}` does not exist.'.format(name)))

    if hasattr(project_cfg, 'defaults'):
        env.update(project_cfg.defaults)

    env.update(project_cfg.environments[name])
    env.environment = name

    if env.get('is_vagrant'):
        env.superuser = '******'
        env.ssh_config_path = '.ssh_config'
        env.use_ssh_config = True
        env.disable_known_hosts = True
        local('vagrant ssh-config > .ssh_config')
    else:
        env.is_vagrant = False
Esempio n. 17
0
 def __exit__(self, type, value, tb):
     if tb is None:
         puts(green('Finishing transaction.'))
     else:
         error('Transaction failed.', func=warn)
         error('Rolling back.', func=warn)
         self.rollback()
Esempio n. 18
0
def cluster(cmd=None):
    """
    The cluster mini wrapper, values for cmd arg : boot,start,stop,destroy
      * fab cluster:boot : do a 'docker.io run' on each container, run it once time
      * fab cluster:stop : stop all containers in the cluster
      * fab cluster:start : do a 'docker.io start' on each container
      * fab cluster:destroy : destroy all the cluster and clean docker's data (see dkclean)
    """

    generic_msg  = "\nAvailable commands are :\n  * start\n  * stop\n  * destroy\n  * boot"

    if cmd == "start":
        cluster_start()
        puts(green("cluster start [OK]"))
    elif cmd == "stop":
        cluster_stop()
        puts(green("cluster stop [OK]"))
    elif cmd == "boot":
        cluster_boot()
        puts(green("cluster boot [OK]"))
    elif cmd == "destroy":
        if confirm("Are you sure to destroy ? Everything you need is backuped ?",default=False):
            cluster_destroy()
            puts(green("cluster destroy [OK]"))
        else:
            abort(yellow("cluster destroy [CANCELED]"))
    elif cmd == None:
        puts(generic_msg)
    else:
        error(red("commande %s not found !" % cmd))
        puts(generic_msg)
Esempio n. 19
0
def wait_until_role_is_up(*,
                          role: str,
                          task: Callable,
                          poll_interval: int = 3,
                          max_wait: int = 20) -> bool:
    waiting_seconds = 0
    stderr = '-'

    puts(f'waiting for {role} to be up for as long as {max_wait} seconds')
    while waiting_seconds < max_wait:
        # skip waiting on the first iteration, uwsgi may already be up
        up_hosts, stderr = check_role_is_up(role, task)
        if all(up_hosts.values()):
            puts(f'role {role} is up after {waiting_seconds} seconds')
            return True
        else:
            failed_hosts = ', '.join(
                [host for host, status in up_hosts.items() if not status])
            puts(
                f'role {role} is not up after {waiting_seconds} seconds: failed hosts: {failed_hosts}'
            )

        sleep(poll_interval)
        waiting_seconds += poll_interval

    with settings(warn_only=False):
        error(
            f'waited for {waiting_seconds} seconds, role {role} is not up. Aborting \n {stderr}'
        )

    return False
Esempio n. 20
0
def curl_and_json(apis, endpoint, **kwargs):
    """Convenience call for curl + json parse"""

    if 'data' in kwargs:
        kwargs['data'] = json.dumps(kwargs['data'])
    tries = DEFAULT_HTTP_TRIES
    resp = None
    while tries > 0:
        try:
            resp = curl(apis, endpoint, **kwargs)
            break
        except Exception as e:
            tries -= 1
            time.sleep(DEFAULT_INTERVAL)
    if tries == 0:
        msg = 'Could not connect to {} after {} tries, giving up'.format(
            ','.join(apis), DEFAULT_HTTP_TRIES)
        error(msg)
        raise (Exception(msg))

    try:
        data = json.loads(resp)
    except Exception as e:
        error('Invalid JSON: {}'.format(data), exception=e)
        raise

    return data
Esempio n. 21
0
    def __init__(self, callable, *args, **kwargs):
        super(CustomTask, self).__init__(callable, *args, **kwargs)
        if env.ssh_config_path and os.path.isfile(os.path.expanduser(env.ssh_config_path)):
            env.use_ssh_config = True
        if env.host_string == 'localhost' or not env.hosts:
            env.pyexecutable = sys_executable
            env.cd = partial(custom_cd, lcd, 002)
            env.run = local
            conffile = 'devenv.json'
        else:
            env.cd = partial(custom_cd, cd, 002)
            env.run = run
            if 'production' in env and env.production:
                error('TBD')
            else:
                conffile = 'testenv.json'

        if 'conffile' in env:
            conffile = env.conffile

        with open(conffile) as f:
            d = json_load(f)

        env.update(d)

        env.activate = ''.join(['. ', env.venvpath, '/bin/activate'])
Esempio n. 22
0
def script(container, script_path, fail_nonzero=False, upload_dir=False, **kwargs):
    """
    Runs a script inside a container, which is created with all its dependencies. The container is removed after it
    has been run, whereas the dependencies are not destroyed. The output is printed to the console.

    :param container: Container configuration name.
    :param script_path: Local path to the script file.
    :param fail_nonzero: Fail if the script returns with a nonzero exit code.
    :param upload_dir: Upload the entire parent directory of the script file to the remote.
    :param kwargs: Additional keyword arguments to the run_script action.
    """
    full_script_path = os.path.abspath(script_path)
    prefix, name = os.path.split(full_script_path)
    with temp_dir() as remote_tmp:
        if upload_dir:
            prefix_path, prefix_name = os.path.split(prefix)
            remote_script = posixpath.join(remote_tmp, prefix_name, name)
            put(prefix, remote_tmp, mirror_local_mode=True)
        else:
            remote_script = posixpath.join(remote_tmp, name)
            put(script_path, remote_script, mirror_local_mode=True)
        results = [output.result
                   for output in container_fabric().run_script(container, script_path=remote_script, **kwargs)
                   if o.action_type == ContainerUtilAction.SCRIPT]
    for res in results:
        puts("Exit code: {0}".format(res['exit_code']))
        if res['exit_code'] == 0 or not fail_nonzero:
            puts(res['log'])
        else:
            error(res['log'])
Esempio n. 23
0
def is_schema_misconfigured(schema={}):
    for i in ['ENGINE', 'NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
        if not i in schema or schema[i] is None:
            error('schema[{}] misconfigured'.format(i))
            return True

    return False
Esempio n. 24
0
def archive(image_id, dry_run=True, tenant=None, community=False):
    """Archive image by moving it to the <NECTAR_ARCHIVES> tenant.
    If the community flag is set
    please specify the community archive tenant id.
    """
    if dry_run:
        print("Running in dry run mode")

    if community:
        archive_tenant = get_community_archive_tenant(tenant)
    else:
        archive_tenant = get_archive_tenant(tenant)

    gc = get_glance_client(keystone.client())
    try:
        image = gc.images.get(image_id)
    except exc.HTTPNotFound:
        error("Image ID not found.")

    if dry_run:
        print("Would archive image {} ({}) to tenant {} ({})"
              .format(image.name, image.id,
                      archive_tenant.name, archive_tenant.id))
        if 'murano_image_info' in image.properties:
            print('Would remove murano image properties from {}'
                  .format(image.id))
    else:
        print("Archiving image {} ({}) to tenant {} ({})"
              .format(image.name, image.id,
                      archive_tenant.name, archive_tenant.id))
        change_tenant(image, archive_tenant)
        if 'murano_image_info' in image.properties:
            print('Removing murano image properties from {}'.format(image.id))
            remove_property(image, 'murano_image_info')
Esempio n. 25
0
def get_previous_version_tag():
    """
    Get the version of the previous release
    """
    # We try, probably too hard, to portably get the number of the previous
    # release of SymPy. Our strategy is to look at the git tags.  The
    # following assumptions are made about the git tags:

    # - The only tags are for releases
    # - The tags are given the consistent naming:
    #    sympy-major.minor.micro[.rcnumber]
    #    (e.g., sympy-0.7.2 or sympy-0.7.2.rc1)
    # In particular, it goes back in the tag history and finds the most recent
    # tag that doesn't contain the current short version number as a substring.
    shortversion = get_sympy_short_version()
    curcommit = "HEAD"
    with cd("/home/vagrant/repos/sympy"):
        while True:
            curtag = run("git describe --abbrev=0 --tags " +
                curcommit).strip()
            if shortversion in curtag:
                # If the tagged commit is a merge commit, we cannot be sure
                # that it will go back in the right direction. This almost
                # never happens, so just error
                parents = local("git rev-list --parents -n 1 " + curtag,
                    capture=True).strip().split()
                # rev-list prints the current commit and then all its parents
                assert len(parents) == 2, curtag
                curcommit = curtag + "^" # The parent of the tagged commit
            else:
                print(blue("Using {tag} as the tag for the previous "
                    "release.".format(tag=curtag), bold=True))
                return curtag
        error("Could not find the tag for the previous release.")
Esempio n. 26
0
def is_schema_misconfigured(schema={}):
    for i in ['ENGINE', 'NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
        if not i in schema or schema[i] is None:
            error('schema[{}] misconfigured'.format(i))
            return True

    return False
Esempio n. 27
0
def RollOverControlAppServers(service="stop"):
    ''' Stop/Start/Restart/State weblogic admin servers '''
    print service
    weblogic_bin_path = os.path.join(g_weblogic_dir, 'bin')
    with cd(weblogic_bin_path):
        if service.lower()=="stop":
            sudo('./weblogic.admin.managed stop',user=wworks_User)
            sudo('./weblogic.platform.managed stop',user=wworks_User)
            print('[%s]' % (blue("PASS")))
        elif service.lower()=="start":
            sudo('nohup ./weblogic.admin.managed start | tee ../logs/nohup.out',user=wworks_User)
            sudo('nohup ./weblogic.platform.managed start | tee ../logs/nohup.out',user=wworks_User)
            print('[%s]' % (blue("PASS")))
        elif service.lower()=="restart":
            sudo('./weblogic.admin.managed stop',user=wworks_User)
            sudo('./weblogic.platform.managed stop',user=wworks_User)
            sudo('nohup ./weblogic.admin.managed start | tee ../logs/nohup.out',user=wworks_User)
            sudo('nohup ./weblogic.platform.managed start | tee ../logs/nohup.out',user=wworks_User)
            print('[%s]' % (blue("PASS")))
        elif service.lower()=="state":
            sudo('./weblogic.admin.managed state',user=wworks_User)
            sudo('./weblogic.platform.managed state',user=wworks_User)
            print('[%s]' % (blue("PASS")))
        else:
            error('Invalid service type')
Esempio n. 28
0
def get_or_create_group(groupname,
                        gid_preset,
                        system=False,
                        id_dependent=True):
    """
    Returns the id for the given group, and creates it first in case it does not exist.

    :param groupname: Group name.
    :type groupname: unicode
    :param gid_preset: Group id to set if a new group is created.
    :type gid_preset: int or unicode
    :param system: Create a system group.
    :type system: bool
    :param id_dependent: If the group exists, but its id does not match `gid_preset`, an error is thrown.
    :type id_dependent: bool
    :return: Group id of the existing or new group.
    :rtype: int
    """
    gid = get_group_id(groupname)
    if gid is None:
        create_group(groupname, gid_preset, system)
        return gid_preset
    elif id_dependent and gid != gid_preset:
        error(
            "Present group id '{0}' does not match the required id of the environment '{1}'."
            .format(gid, gid_preset))
    return gid
Esempio n. 29
0
def generate_instance_info(instance_id, style=None):
    nc = nova.client()
    kc = keystone.client()
    gc = glance.get_glance_client(kc)

    try:
        instance = nc.servers.get(instance_id)
    except n_exc.NotFound:
        error("Instance {} not found".format(instance_id))

    info = instance._info.copy()
    for network_label, address_list in instance.networks.items():
        info["%s network" % network_label] = ", ".join(address_list)

    flavor = info.get("flavor", {})
    flavor_id = flavor.get("id", "")

    try:
        info["flavor"] = "%s (%s)" % (nova.get_flavor(nc, flavor_id).name, flavor_id)
    except Exception:
        info["flavor"] = "%s (%s)" % ("Flavor not found", flavor_id)

    # Image
    image = info.get("image", {})
    if image:
        image_id = image.get("id", "")
        try:
            img = gc.images.get(image_id)
            nectar_build = img.properties.get("nectar_build", "N/A")
            info["image"] = "%s (%s, NeCTAR Build %s)" % (img.name, img.id, nectar_build)
        except Exception:
            info["image"] = "Image not found (%s)" % image_id
    else:  # Booted from volume
        info["image"] = "Attempt to boot from volume - no image supplied"

    # Tenant
    tenant_id = info.get("tenant_id")
    if tenant_id:
        try:
            tenant = keystone.get_tenant(kc, tenant_id)
            info["tenant_id"] = "%s (%s)" % (tenant.name, tenant.id)
        except Exception:
            pass

    # User
    user_id = info.get("user_id")
    if user_id:
        try:
            user = keystone.get_user(kc, user_id)
            info["user_id"] = "%s (%s)" % (user.name, user.id)
        except Exception:
            pass

    # Remove stuff
    info.pop("links", None)
    info.pop("addresses", None)
    info.pop("hostId", None)
    info.pop("security_groups", None)

    return _format_instance(info, style=style)
Esempio n. 30
0
def readCfg():
    global conf
    global isConfReaded
    global configFile
    global BASE_LOCAL_DIR, MASTER_HOST
    if configFile is None:
        error('Configfile of the deployment is missing! Use task "cfg"!')
    if not isConfReaded:
        conf = readConfig(configFile)
        isConfReaded = True

    if conf.has_option('ALGO', 'USER'):
        env.user = conf.get('ALGO', 'USER')
    else:
        env.user = '******'

    if conf.has_option('ALGO', 'KEY_FILENAME'):
        env.key_filename = conf.get('ALGO', 'KEY_FILENAME')

    if conf.has_option('NODE', 'REMOTE_DIR'):
        slaveryCfg = conf.get('NODE', 'REMOTE_DIR') + '/slavery.cfg'
        conf.set('NODE', 'SLAVERY_CFG', slaveryCfg)

    BASE_DIR = conf.get('ALGO', 'BASE_DIR')
    LOCAL_DIR = BASE_DIR + "/outdir/"

    conf.set('ALGO', 'LOCAL_DIR', LOCAL_DIR)

    BASE_LOCAL_DIR = LOCAL_DIR
    MASTER_HOST = conf.get('ALGO', 'MASTER_HOST')

    conf.set("ALGO", "MASTER_LOG", BASE_LOCAL_DIR + "master.log")

    buildHosts(conf)
    return conf
Esempio n. 31
0
def changetenant(image, tenant=None):
    """move image to new_tenant"""
    msg = " ".join(("No archive tenant set.", "Please set tenant in",
                    "[cfg:hivemind_contrib.glance.archivetenant]"))
    if tenant is None:
        error(msg)
    image.update(owner=tenant)
Esempio n. 32
0
def promote(image_id, dry_run=True, tenant=None, community=False):
    """If the supplied image has nectar_name and nectar_build metadata, set
    to public. If there is an image with matching nectar_name and lower
    nectar_build, move that image to the <NECTAR_ARCHIVES> tenant.
    If the community flag is set please specify the community tenant id.
    """
    if dry_run:
        print("Running in dry run mode")

    if community:
        archive_tenant = get_community_tenant(tenant)
    else:
        archive_tenant = get_archive_tenant(tenant)

    images = get_glance_client(keystone.client()).images
    try:
        image = images.get(image_id)
    except exc.HTTPNotFound:
        error("Image ID not found.")
    if not community:
        try:
            name = image.properties['nectar_name']
            build = (int(image.properties['nectar_build']))
        except KeyError:
            error("nectar_name or nectar_build not found for image.")

        m_check = partial(match, name, build)
        matchingimages = filter(m_check, images.findall(owner=image.owner))
    else:
        matchingimages = [image]

    for i in matchingimages:
        if dry_run:
            print("Would change ownership of image {} ({}) to tenant {} ({})"
                  .format(i.name, i.id,
                          archive_tenant.name, archive_tenant.id))
            if 'murano_image_info' in i.properties:
                print('Would remove murano image properties from {}'
                      .format(i.id))
        else:
            change_tenant(i, archive_tenant)
            print("Changing ownership of image {} ({}) to tenant {} ({})"
                  .format(i.name, i.id,
                          archive_tenant.name, archive_tenant.id))
            if 'murano_image_info' in i.properties:
                print('Removing murano image properties from {}'
                      .format(i.id))
                remove_property(i, 'murano_image_info')

    if image.is_public:
        print("Image {} ({}) already set public"
              .format(image.name, image.id))
    else:
        if dry_run:
            print("Would set image {} ({}) to public"
                  .format(image.name, image.id))
        else:
            print("Setting image {} ({}) to public"
                  .format(image.name, image.id))
            image.update(is_public=True)
Esempio n. 33
0
def stdout_result(cmd, expected_errors=(), shell=True, sudo=False, quiet=False):
    """
    Runs a command and returns the result, that would be written to `stdout`, as a string. The output itself can
    be suppressed.

    :param cmd: Command to run.
    :type cmd: unicode
    :param expected_errors: If the return code is non-zero, but found in this tuple, it will be ignored. ``None`` is
      returned in this case.
    :type expected_errors: tuple
    :param shell: Use a shell.
    :type shell: bool
    :param sudo: Use `sudo`.
    :type sudo: bool
    :param quiet: If set to ``True``, does not show any output.
    :type quiet: bool
    :return: The result of the command as would be written to `stdout`.
    :rtype: unicode
    """
    which = operations.sudo if sudo else operations.run
    with hide('warnings'):
        result = which(cmd, shell=shell, quiet=quiet, warn_only=True)
    if result.return_code == 0:
        return result

    if result.return_code not in expected_errors:
        error("Received unexpected error code {0} while executing!".format(result.return_code))
    return None
Esempio n. 34
0
def add_plugin(name, image=None, dockerfile='Dockerfile', build_context=None,
               should_restart=True):
    """
    Add a new plugin by modifying skygear server configuration

    If an image is specified, it will be treated as a Docker repository image
    and pulled from the repository. If an image is not specified, a build
    directory is configured where you should upload your plugin via git.

    Skygear Server is restarted automatically by default if an image is
    specified.

    If your Dockerfile is not at the project root, you should specify
    an alternative Dockerfile location and build context.
    """
    config_file = '/home/ubuntu/myapp/development.ini'
    service_name = "plugin_{0}".format(name)
    with cd("myapp"):
        data = read_compose_override()
        if service_name in data.get('services', {}):
            error("Plugin '{0}' already exists.".format(name))
            return
        augtool(r"""
        set /files{0}/plugin\ \"{1}\"/transport http
        set /files{0}/plugin\ \"{1}\"/path http://{1}:8000
        """.format(config_file, service_name))
        data = add_docker_plugin(data, name, image, dockerfile, build_context)
        write_compose_override(data)
    if image is None:
        puts("""Plugin '{0}' is added to Skygear. To upload plugin, add
'git@<ip address>:{0}' as a git remote and push your code.""".format(name))
        return
    if should_restart:
        restart(should_recreate=True)
Esempio n. 35
0
 def _lookup(self, key):
     result = self.config_properties.get(key, self.default_config[key])
     if not result:
         error("Key %s is not configured in coordinator configuration"
               "%s on host %s and has no default" %
               (key, self.config_host, self.config_path))
     return result
Esempio n. 36
0
def uptodate(proj=None):
    """If the project is up to date"""
    if proj is None:
        proj = project_dir
    with cd(proj):
        with hide('running', 'stdout'):
            run('git remote update')
            local = run('git rev-parse @')
            remote = run('git rev-parse @{u}')
            base = run('git merge-base @ @{u}')
        if local == remote:
            return True
        elif local == base:
            utils.warn('local: {} remote: {} base: {}'.format(
                local, remote, base))
            return False
        elif remote == base:
            utils.warn('local: {} remote: {} base: {}'.format(
                local, remote, base))
            utils.error('Push project first!!!')
        else:
            utils.warn('local: {} remote: {} base: {}'.format(
                local, remote, base))
            utils.error('local diverged!!!')
        return False
Esempio n. 37
0
def get_creds(username=None, password=None):
    msg = " ".join(("No ospurge credentials.", "Please set username",
                    "and password for the ospurge user in",
                    "[cfg:hivemind_contrib.ospurge.creds]"))
    if username is None or password is None:
        error(msg)
    return username, password
Esempio n. 38
0
def nice_local(command, nice=0, capture=False, shell=None):
    """
    Exactly like Fabric's local but with an optional nice argument
    """

    from fabric.operations import _prefix_env_vars, _prefix_commands, _AttributeString
    from fabric.state import output, win32, env
    from fabric.utils import error

    given_command = command
    # Apply cd(), path() etc
    with_env = _prefix_env_vars(command, local=True)
    wrapped_command = _prefix_commands(with_env, 'local')

    if output.debug:
        print("[localhost] local: %s" % (wrapped_command))
    elif output.running:
        print("[localhost] local: " + given_command)

    # Tie in to global output controls as best we can; our capture argument
    # takes precedence over the output settings.
    dev_null = None
    if capture:
        out_stream = subprocess.PIPE
        err_stream = subprocess.PIPE
    else:
        dev_null = open(os.devnull, 'w+')
        # Non-captured, hidden streams are discarded.
        out_stream = None if output.stdout else dev_null
        err_stream = None if output.stderr else dev_null
    try:
        cmd_arg = wrapped_command if win32 else [wrapped_command]
        p = subprocess.Popen(cmd_arg,
                             shell=True,
                             stdout=out_stream,
                             stderr=err_stream,
                             executable=shell,
                             preexec_fn=lambda: os.nice(nice),
                             close_fds=(not win32))
        (stdout, stderr) = p.communicate()
    finally:
        if dev_null is not None:
            dev_null.close()
    # Handle error condition (deal with stdout being None, too)
    out = _AttributeString(stdout.strip() if stdout else "")
    err = _AttributeString(stderr.strip() if stderr else "")
    out.command = given_command
    out.real_command = wrapped_command
    out.failed = False
    out.return_code = p.returncode
    out.stderr = err
    if p.returncode not in env.ok_ret_codes:
        out.failed = True
        msg = "local() encountered an error (return code %s) while executing '%s'" % (
            p.returncode, command)
        error(message=msg, stdout=out, stderr=err)
    out.succeeded = not out.failed
    # If we were capturing, this will be a string; otherwise it will be None.
    return out
Esempio n. 39
0
def _run_command(command, shell=True, pty=True, combine_stderr=True,
    sudo=False, user=None, quiet=False, stdout=None, stderr=None):
    """
    Underpinnings of `run` and `sudo`. See their docstrings for more info.
    """
    with quiet_manager() if quiet else _noop():
        # Set up new var so original argument can be displayed verbatim later.
        given_command = command
        # Handle context manager modifications, and shell wrapping
        wrapped_command = _shell_wrap(
            _prefix_commands(_prefix_env_vars(command), 'remote'),
            shell,
            _sudo_prefix(user) if sudo else None
        )
        # Execute info line
        which = 'sudo' if sudo else 'run'
        if output.debug:
            print("[%s] %s: %s" % (env.host_string, which, wrapped_command))
        elif output.running or env.dry_run_remote:
            print("[%s] %s: %s" % (env.host_string, which, given_command))

        if env.dry_run_remote:
            # Fake exeuction, assume command completed ok and returned 0
            stdout, stderr, status = ("", "", 0)
        else: 
            # Actual execution, stdin/stdout/stderr handling, and termination
            result_stdout, result_stderr, status = _execute(default_channel(), wrapped_command,
                pty, combine_stderr, stdout, stderr)

        # Assemble output string
        out = _AttributeString(result_stdout)
        err = _AttributeString(result_stderr)

        # Error handling
        out.failed = False
        if status != 0:
            out.failed = True
            msg = "%s() received nonzero return code %s while executing" % (
                which, status
            )
            if env.warn_only:
                msg += " '%s'!" % given_command
            else:
                msg += "!\n\nRequested: %s\nExecuted: %s" % (
                    given_command, wrapped_command
                )
            error(message=msg, stdout=out, stderr=err)

        # Attach return code to output string so users who have set things to
        # warn only, can inspect the error code.
        out.return_code = status

        # Convenience mirror of .failed
        out.succeeded = not out.failed

        # Attach stderr for anyone interested in that.
        out.stderr = err

        return out
Esempio n. 40
0
def valid_distro(distro, raise_error=True):
    if distro not in SUPPORTED_DISTROS:
        if raise_error:
            error('"{}" is an invalid distro.  Choose {}'.format(
                distro, SUPPORTED_DISTROS))
        return False

    return True
Esempio n. 41
0
def service(command=None):
    """ usage:  service:command
    ex:     fab -R production service:status
    commands: start, stop, status
    """
    if command:
        run('service fbone %s' % command)
    utils.error('invalid command')
Esempio n. 42
0
def run_tests():
    run_tests = prompt(colored("Run Tests? [y,n]", 'yellow'), default="y")
    if run_tests.lower() in env.truthy:

        result = local('python manage.py test')

        if result not in ['', 1, True]:
            error(colored('You may not proceed as the tests are not passing', 'orange'))
Esempio n. 43
0
def ps(name=None):
    """ usage:  ps:name
    ex:     fab -R production ps:name
    name: process name
    """
    if name:
        run('ps aux | grep %s' % name)
    utils.error('invalid command')
Esempio n. 44
0
def service(command=None):
    """ usage:  service:command
    ex:     fab -R production service:status
    commands: start, stop, status
    """
    if command:
        run('service fbone %s' % command)
    utils.error('invalid command')
Esempio n. 45
0
def _get_address(interface_name, pattern):
    out = stdout_result('ifconfig {0}'.format(interface_name), (1,), shell=False, quiet=True)
    if not out:
        error("Network interface {0} not found.".format(interface_name))
    match = pattern.search(out)
    if match:
        return match.group(1)
    return None
Esempio n. 46
0
def ps(name=None):
    """ usage:  ps:name
    ex:     fab -R production ps:name
    name: process name
    """
    if name:
        run('ps aux | grep %s' % name)
    utils.error('invalid command')
Esempio n. 47
0
 def _lookup_node_config(self, key):
     result = self.node_config_properties.get(key, self.default_node_config[key])
     if not result:
         error(
             "Key %s is not configured in coordinator node configuration"
             "%s on host %s and has no default" %
             (key, self.config_host, os.path.join(REMOTE_CONF_DIR, NODE_PROPERTIES)))
     return result
Esempio n. 48
0
def bootstrap():
    '''Places upstart and varnish configs, reload varnish'''
    if not exists('/etc/varnish'):
        error('Varnish is not installed. Has this host been chef bootstrapped with the "derbyjs-com" role?')
    put('varnish/*', remote_path='/etc/varnish/', use_sudo=True)
    sudo('mkdir -p /etc/init/derbyjs')
    put('init/*', remote_path='/etc/init/derbyjs/', use_sudo=True)
    sudo('service varnish reload')
Esempio n. 49
0
 def read(self, path, required=False):
     fn = self.loader.decrypt if path.endswith(".gpg") else self.loader.static
     if not self.loader.exists(path):
         if required:
             utils.error("Configuration file '%s' not found" % path)
             raise RuntimeError
         return
     self.readfp(StringIO.StringIO(fn(path)))
Esempio n. 50
0
def update_sympy_org(website_location=None):
    """
    Update sympy.org

    This just means adding an entry to the news section.
    """
    website_location = website_location or get_location("sympy.github.com")

    # Check that the website directory is clean
    local("cd {website_location} && git diff --exit-code > /dev/null".format(
        website_location=website_location))
    local("cd {website_location} && git diff --cached --exit-code > /dev/null".
          format(website_location=website_location))

    release_date = time.gmtime(
        os.path.getctime(os.path.join("release",
                                      tarball_formatter()["source"])))
    release_year = str(release_date.tm_year)
    release_month = str(release_date.tm_mon)
    release_day = str(release_date.tm_mday)
    version = get_sympy_version()

    with open(os.path.join(website_location, "templates", "index.html"),
              "r") as f:
        lines = f.read().split("\n")
        # We could try to use some html parser, but this way is easier
        try:
            news = lines.index(r"    <h3>{% trans %}News{% endtrans %}</h3>")
        except ValueError:
            error("index.html format not as expected")
        lines.insert(
            news + 2,  # There is a <p> after the news line. Put it
            # after that.
            r"""        <span class="date">{{ datetime(""" + release_year +
            """, """ + release_month + """, """ + release_day +
            """) }}</span> {% trans v='""" + version +
            """' %}Version {{ v }} released{% endtrans %} (<a href="https://github.com/sympy/sympy/wiki/Release-Notes-for-"""
            + version + """">{% trans %}changes{% endtrans %}</a>)<br/>
    </p><p>""",
        )

    with open(os.path.join(website_location, "templates", "index.html"),
              "w") as f:
        print("Updating index.html template")
        f.write("\n".join(lines))

    print("Generating website pages")
    local("cd {website_location} && ./generate".format(
        website_location=website_location))

    print("Committing")
    local(
        "cd {website_location} && git commit -a -m 'Add {version} to the news'"
        .format(website_location=website_location, version=version))

    print("Pushing")
    local("cd {website_location} && git push origin".format(
        website_location=website_location))
Esempio n. 51
0
 def test_error_includes_stderr_if_given_and_hidden(self):
     """
     error() correctly prints stderr if it was previously hidden
     """
     # Mostly to catch regression bug(s)
     stderr = "this is my stderr"
     with hide('stderr'):
         error("error message", func=utils.abort, stderr=stderr)
     assert_contains(stderr, sys.stderr.getvalue())
Esempio n. 52
0
def remove_userspace():
    """
    Deletes (!) the SymPy changes. Use with great care.

    This should be run between runs to reset everything.
    """
    run("rm -rf repos")
    if os.path.exists("release"):
        error("release directory already exists locally. Remove it to continue.")
Esempio n. 53
0
 def _setup():
     result = sudo('rmvirtualenv {}'.format(env.virtualenv_name))
     if result.succeeded:
         print(
             blue('removing {} virtualenv .....'.format(
                 env.virtualenv_name)))
         print(green('{} virtualenv removed.'.format(env.virtualenv_name)))
     else:
         error(result)
Esempio n. 54
0
 def build(self, tag, **kwargs):
     """
     Identical to :meth:`dockermap.client.base.DockerClientWrapper.build` with additional logging.
     """
     kwargs['raise_on_error'] = True
     self.push_log("Building image '{0}'.".format(tag))
     try:
         return super(DockerFabricClient, self).build(tag, **kwargs)
     except DockerStatusError as e:
         error(e.message)
Esempio n. 55
0
def _run_host_command(command, shell=True, pty=True, combine_stderr=True):
    """
    Run host wrapper command as root

    (Modified from fabric.operations._run_command to ignore prefixes,
    path(), cd(), and always use sudo.)
    """
    # Set up new var so original argument can be displayed verbatim later.
    given_command = command
    # Handle context manager modifications, and shell wrapping
    wrapped_command = _shell_wrap(
        command,
        shell,
        _sudo_prefix(None)
    )
    # Execute info line
    if output.debug:
        print("[%s] %s: %s" % (env.host_string, 'sudo', wrapped_command))
    elif output.running:
        print("[%s] %s: %s" % (env.host_string, 'sudo', given_command))

    # Actual execution, stdin/stdout/stderr handling, and termination
    stdout, stderr, status = _execute(default_channel(), wrapped_command, pty,
        combine_stderr)

    # Assemble output string
    out = _AttributeString(stdout)
    err = _AttributeString(stderr)

    # Error handling
    out.failed = False
    if status != 0:
        out.failed = True
        msg = "%s() received nonzero return code %s while executing" % (
            'sudo', status
        )
        if env.warn_only:
            msg += " '%s'!" % given_command
        else:
            msg += "!\n\nRequested: %s\nExecuted: %s" % (
                given_command, wrapped_command
            )
        error(message=msg, stdout=out, stderr=err)

    # Attach return code to output string so users who have set things to
    # warn only, can inspect the error code.
    out.return_code = status

    # Convenience mirror of .failed
    out.succeeded = not out.failed

    # Attach stderr for anyone interested in that.
    out.stderr = err

    return out