Ejemplo n.º 1
0
def su(request):
    """
    Impersonate another user.

    This allows an admin to take the identity of any other user. It is meant to
    be used strictly for debugging. You can return to your regular user simply
    by logging out. This won't affect the last login time of the actual user.
    An email should be immediately sent out to the team, notifying of the 'su'
    action for security reasons.

    """
    # SEC raise exception if user not admin
    user = user_from_request(request, admin=True)

    session = request.environ['session']
    if isinstance(session, ApiToken):
        raise ForbiddenError('Cannot do su when authenticated with api token')
    real_email = user.email
    params = params_from_request(request)
    email = params.get('email')
    if not email:
        raise RequiredParameterMissingError('email')
    try:
        user = User.objects.get(email=email)
    except (UserNotFoundError, User.DoesNotExist):
        raise UserUnauthorizedError()
    reissue_cookie_session(request, real_email, su=user.id)

    # alert admins
    subject = "Some admin used su"
    body = "Admin: %s\nUser: %s\nServer: %s" % (real_email, user.email,
                                                config.CORE_URI)
    send_email(subject, body, config.NOTIFICATION_EMAIL['ops'])
    return HTTPFound('/')
Ejemplo n.º 2
0
    def resume_machine(self, machine):
        """Resume machine

        The param `machine` must be an instance of a machine model of this
        cloud.

        Not that the usual way to resume a machine would be to run

            machine.ctl.resume()

        which would in turn call this method, so that its cloud can customize
        it as needed.

        If a subclass of this controller wishes to override the way machines
        are resumed, it should override `_resume_machine` method instead.

        """
        # assert isinstance(machine.cloud, Machine)
        assert self.cloud == machine.cloud
        if not machine.actions.resume:
            raise ForbiddenError("Machine doesn't support resume.")
        log.debug("Resuming machine %s", machine)

        machine_libcloud = self._get_machine_libcloud(machine)
        try:
            self._resume_machine(machine, machine_libcloud)
        except MistError as exc:
            log.error("Could not resume machine %s", machine)
            raise
        except Exception as exc:
            log.exception(exc)
            raise InternalServerError(exc=exc)
Ejemplo n.º 3
0
def find_metrics(machine):
    if not machine.monitoring.hasmonitoring:
        raise ForbiddenError("Machine doesn't have monitoring enabled.")
    metrics = MultiHandler(machine.id).find_metrics()
    for item in metrics:
        if item['alias'].rfind("%(head)s.") == 0:
            item['alias'] = item['alias'][9:]
    metrics = _clean_monitor_metrics(machine.owner, metrics)
    for metric_id in metrics:
        metrics[metric_id]['id'] = metric_id

    # # complex custom metrics won't appear unless manually added
    # for metric_id in machine.monitoring.metrics:
        # metric = user.metrics.get(metric_id, Metric())
        # log.warning("find_metrics manually adding complex custom metrics!")
        # if "%(head)s" in metrid_id:
        #     metrics.append({
        #         'metric_id': metric_id,
        #         'name': metric.name,
        #         'unit': metric.unit,
        #         '_target': metric_id,
        #         'max_value': None,
        #         'min_value': None,
        #         'priority': -100,
        #     })

    return metrics
Ejemplo n.º 4
0
    def resize_machine(self, machine, plan_id, kwargs):
        """Resize machine

        The param `machine` must be an instance of a machine model of this
        cloud.

        Not that the usual way to resize a machine would be to run

            machine.ctl.resize(plan_id)

        which would in turn call this method, so that its cloud can customize
        it as needed.

        If a subclass of this controller wishes to override the way machines
        are resizeed, it should override `_resize_machine` method instead.

        """
        # assert isinstance(machine.cloud, Machine)
        assert self.cloud == machine.cloud
        if not machine.actions.resize:
            raise ForbiddenError("Machine doesn't support resize.")
        log.debug("Resizing machine %s", machine)

        machine_libcloud = self._get_machine_libcloud(machine)
        try:
            self._resize_machine(machine, machine_libcloud, plan_id, kwargs)

            # TODO: For better separation of concerns, maybe trigger below
            # using an event?
            from mist.api.notifications.methods import (
                dismiss_scale_notifications)
            # TODO: Make sure user feedback is positive below!
            dismiss_scale_notifications(machine, feedback='POSITIVE')
        except Exception as exc:
            raise BadRequestError('Failed to resize node: %s' % exc)
Ejemplo n.º 5
0
def find_metrics(machine):
    if not machine.monitoring.hasmonitoring:
        raise ForbiddenError("Machine doesn't have monitoring enabled.")
    try:
        data = requests.get("%s/v1/resources/%s" %
                            (config.TSFDB_URI, machine.id),
                            headers={'x-org-id': machine.owner.id},
                            timeout=5)
    except Exception as exc:
        log.error('Got %r on find_metrics for resource %s' % (exc, machine.id))
        raise ServiceUnavailableError()

    if not data.ok:
        log.error('Got %d on find_metrics: %s', data.status_code, data.content)
        raise ServiceUnavailableError()

    return data.json().get("metrics", {})
Ejemplo n.º 6
0
def undeploy_plugin(request):
    """
    Tags: monitoring
    ---
    Undeploy a custom plugin/script from a machine

    READ permission required on cloud
    EDIT_CUSTOM_METRICS permission required on machine

    ---

    machine:
      in: path
      type: string
      required: true
      description: the UUID of the machine to undeploy the custom script from
    plugin:
      in: path
      type: string
      required: true
      description: the name of the custom plugin/script
    plugin_type:
      in: query
      type: string
      required: true
      description: the plugin's type, e.g. "python" for python scripts

    """
    auth_context = auth_context_from_request(request)
    machine = _machine_from_matchdict(request)
    params = params_from_request(request)

    plugin_id = request.matchdict['plugin']

    # SEC check permission EDIT_CUSTOM_METRICS on machine
    auth_context.check_perm('machine', 'edit_custom_metrics', machine.id)

    if not machine.monitoring.hasmonitoring:
        raise ForbiddenError("Machine doesn't seem to have monitoring enabled")

    # Undeploy executable.
    # FIXME Is the following check really necessary?
    if params.get('plugin_type') == 'python':
        return mist.api.monitoring.methods.undeploy_python_plugin(machine,
                                                                  plugin_id)
    raise BadRequestError('Invalid plugin_type')
Ejemplo n.º 7
0
    def destroy_machine(self, machine):
        """Destroy machine

        The param `machine` must be an instance of a machine model of this
        cloud.

        Not that the usual way to destroy a machine would be to run

            machine.ctl.destroy()

        which would in turn call this method, so that its cloud can customize
        it as needed.

        If a subclass of this controller wishes to override the way machines
        are destroyed, it should override `_destroy_machine` method instead.

        """
        # assert isinstance(machine.cloud, Machine)
        assert self.cloud == machine.cloud
        if not machine.actions.destroy:
            raise ForbiddenError("Machine doesn't support destroy.")
        log.debug("Destroying machine %s", machine)

        machine_libcloud = self._get_machine_libcloud(machine)
        try:
            self._destroy_machine(machine, machine_libcloud)
        except MistError as exc:
            log.error("Could not destroy machine %s", machine)
            raise
        except Exception as exc:
            log.exception(exc)
            raise InternalServerError(exc=exc)

        while machine.key_associations:
            machine.key_associations.pop()
        machine.state = 'terminated'
        machine.save()
Ejemplo n.º 8
0
def create_token(request):
    """
    Tags: api_tokens
    ---
    Creates a new api token.
    Used so that a user can send his credentials and produce a new api token.
    The api token itself will be returned in a json document along with it's
    id and it's name.
    If user has used su then he should provide his own credentials.However, the
    api token will authenticate the user he is impersonating.
    If name is not sent then a random one with the format api_token_xyz where
    xyz is a number will be produced.
    If the user provides a name then there must be no other token for that user
    with the same name.
    If the user has a cookie or sends an api token in the request headers then
    the username and password must belong to him.
    Used by io to authenticate to core (when running separately. Io sends
    user's email and password. We return an access token that will be used to
    authenticate any further communications.
    An anti-CSRF token is not needed to access this api call.
    If user is coming from oauth then he will be able to create a new token
    without a password provided he is authenticated somehow.
    If you are using the /auth route please switch to /api_v1_tokens route. The
    /auth route is deprecated.
    ---
    email:
      description: User's email
      type: string
      required: true
    password:
      description: User's password
      type: string
      required: true
    name:
      description: Api token name
      type: string
    ttl:
      description: Time to live for the token
      type: integer
    org_id:
      description: Org id if the token will be used in organizational context
      type: string
    """

    params = params_from_request(request)
    email = params.get('email', '').lower()
    password = params.get('password', '')
    api_token_name = params.get('name', '')
    org_id = params.get('org_id', '')
    ttl = params.get('ttl', 60 * 60)
    if isinstance(ttl, basestring) and not ttl.isdigit():
        raise BadRequestError('Ttl must be a number greater than 0')
    ttl = int(ttl)
    if ttl < 0:
        raise BadRequestError('Ttl must be greater or equal to zero')
    if not password:
        raise RequiredParameterMissingError('password')

    try:
        auth_context = auth_context_from_request(request)
        user, org = auth_context.user, auth_context.org
    except UserUnauthorizedError:
        # The following should apply, but currently it can't due to tests.
        # if not org_id:
            # raise RequiredParameterMissingError("No org_id provided")
        if not email:
            raise RequiredParameterMissingError("No email provided")
        org = None
        if org_id:
            try:
                org = Organization.objects.get(id=org_id)
            except Organization.DoesNotExist:
                try:
                    org = Organization.objects.get(name=org_id)
                except Organization.DoesNotExist:
                    # The following should apply, but currently it can't due to
                    # tests.
                    # raise UserUnauthorizedError()
                    pass
        try:
            user = User.objects.get(email=email)
        except User.DoesNotExist:
            raise UserUnauthorizedError()
        # Remove org is not None when we enforce org context on tokens.
        if org is not None and user not in org.members:
            raise ForbiddenError()

    if user.status != 'confirmed':
        raise UserUnauthorizedError()
    if not user.password:
        raise BadRequestError('Please use the GUI to set a password and retry')
    if not user.check_password(password):
        raise UserUnauthorizedError('Wrong password')

    if not org:
        org = reissue_cookie_session(request, user.id).org
    # first check if the api token name is unique if it has been provided
    # otherwise produce a new one.
    if api_token_name:
        # will raise exception if there exists valid token with given name
        token_with_name_not_exists(user, api_token_name)
    else:
        api_token_name = get_random_name_for_token(user)
    tokens_num = len([token for token in ApiToken.objects(user_id=user.id,
                                                          revoked=False)
                      if token.is_valid()])
    if tokens_num < config.ACTIVE_APITOKEN_NUM:
        new_api_token = ApiToken()
        new_api_token.name = api_token_name
        new_api_token.org = org
        new_api_token.ttl = ttl
        new_api_token.set_user(user)
        new_api_token.ip_address = ip_from_request(request)
        new_api_token.user_agent = request.user_agent
        new_api_token.save()
    else:
        raise BadRequestError("MAX number of %s active tokens reached"
                              % config.ACTIVE_APITOKEN_NUM)

    token_view = new_api_token.get_public_view()
    token_view['last_accessed_at'] = 'Never'
    token_view['token'] = new_api_token.token

    return token_view
Ejemplo n.º 9
0
def create_machine(request):
    """
    Tags: machines
    ---
    Creates one or more machines on the specified cloud. If async is true, a
    jobId will be returned.
    READ permission required on cloud.
    CREATE_RESOURCES permission required on cloud.
    READ permission required on location.
    CREATE_RESOURCES permission required on location.
    CREATE permission required on machine.
    RUN permission required on script.
    READ permission required on key.
    ---
    cloud:
      in: path
      required: true
      type: string
    name:
      type: string
      description: Name of the machine
      required: true
      example: "my-digital-ocean-machine"
    image:
      description: Provider's image id to be used on creation
      required: true
      type: string
      example: "17384153"
    size:
      type: string
      description: Provider's size id to be used on creation
      example: "512mb"
    location:
      type: string
      description: Mist internal location id
      example: "3462b4dfbb434986a7dac362789bc402"
    key:
      description: Associate machine with this key. Mist internal key id
      type: string
      example: "da1df7d0402043b9a9c786b100992888"
    monitoring:
      type: boolean
      description: Enable monitoring on the machine
      example: false
    async:
      description: Create machine asynchronously, returning a jobId
      type: boolean
      example: false
    cloud_init:
      description: Cloud Init script
      type: string
    networks:
      type: array
      items:
        type: string
    subnet_id:
      type: string
      description: Optional for EC2
    subnetwork:
      type: string
    image_extra:
      type: string
      description: Required for GCE and Linode
    schedule:
      type: object
    script:
      type: string
    script_id:
      type: string
      example: "e7ac65fb4b23453486778585616b2bb8"
    script_params:
      type: string
    plugins:
      type: array
      items:
        type: string
    post_script_id:
      type: string
    post_script_params:
      type: string
    associate_floating_ip:
      type: boolean
      description: Required for Openstack. Either 'true' or 'false'
    azure_port_bindings:
      type: string
      description: Required for Azure
    storage_account:
      type: string
      description: Required for Azure_arm.
    resource_group:
      type: string
      description: Required for Azure_arm.
    storage_account_type:
      type: string
      description: Required for Azure_arm
    machine_password:
      type: string
      description: Required for Azure_arm
    machine_username:
      type: string
      description: Required for Azure_arm
    bare_metal:
      description: Needed only by SoftLayer cloud
      type: boolean
    billing:
      description: Needed only by SoftLayer cloud
      type: string
      example: "hourly"
    boot:
      description: Required for OnApp
      type: boolean
    build:
      description: Required for OnApp
      type: boolean
    docker_command:
      type: string
    docker_env:
      type: array
      items:
        type: string
    docker_exposed_ports:
      type: object
    docker_port_bindings:
      type: object
    project_id:
      description: ' Needed only by Packet cloud'
      type: string
    softlayer_backend_vlan_id:
      description: 'Specify id of a backend(private) vlan'
      type: integer
    ssh_port:
      type: integer
      example: 22
    ip_addresses:
      type: array
      items:
        type:
          object
    security_group:
      type: string
      description: Machine will join this security group
    vnfs:
      description: Network Virtual Functions to configure in machine
      type: array
      items:
        type: string
      description:
        description: Description of machine. Only for GigG8 machines
        type: string
    """

    params = params_from_request(request)
    cloud_id = request.matchdict['cloud']
    for key in ('name', 'size'):
        if key not in params:
            raise RequiredParameterMissingError(key)

    key_id = params.get('key')
    machine_name = params['name']
    location_id = params.get('location', None)
    image_id = params.get('image')
    if not image_id:
        raise RequiredParameterMissingError("image")
    # this is used in libvirt
    disk_size = int(params.get('libvirt_disk_size', 4))
    disk_path = params.get('libvirt_disk_path', '')
    size = params.get('size', None)
    # deploy_script received as unicode, but ScriptDeployment wants str
    script = str(params.get('script', ''))
    # these are required only for Linode/GCE, passing them anyway
    image_extra = params.get('image_extra', None)
    disk = params.get('disk', None)
    image_name = params.get('image_name', None)
    size_name = params.get('size_name', None)
    location_name = params.get('location_name', None)
    ips = params.get('ips', None)
    monitoring = params.get('monitoring', False)
    storage_account = params.get('storage_account', '')
    storage_account_type = params.get('storage_account_type', '')
    machine_password = params.get('machine_password', '')
    machine_username = params.get('machine_username', '')
    resource_group = params.get('resource_group', '')
    volumes = params.get('volumes', [])
    if volumes and volumes[0].get('volume_id'):
        request.matchdict['volume'] = volumes[0].get('volume_id')
    networks = params.get('networks', [])
    if isinstance(networks, str):
        networks = [networks]
    subnet_id = params.get('subnet_id', '')
    subnetwork = params.get('subnetwork', None)
    ip_addresses = params.get('ip_addresses', [])
    docker_env = params.get('docker_env', [])
    docker_command = params.get('docker_command', None)
    script_id = params.get('script_id', '')
    script_params = params.get('script_params', '')
    post_script_id = params.get('post_script_id', '')
    post_script_params = params.get('post_script_params', '')
    run_async = params.get('async', False)
    quantity = params.get('quantity', 1)
    persist = params.get('persist', False)
    docker_port_bindings = params.get('docker_port_bindings', {})
    docker_exposed_ports = params.get('docker_exposed_ports', {})
    azure_port_bindings = params.get('azure_port_bindings', '')
    # hostname: if provided it will be attempted to assign a DNS name
    hostname = params.get('hostname', '')
    plugins = params.get('plugins')
    cloud_init = params.get('cloud_init', '')
    associate_floating_ip = params.get('associate_floating_ip', False)
    associate_floating_ip_subnet = params.get('attach_floating_ip_subnet',
                                              None)
    project_id = params.get('project', None)
    bare_metal = params.get('bare_metal', False)
    # bare_metal True creates a hardware server in SoftLayer,
    # whule bare_metal False creates a virtual cloud server
    # hourly True is the default setting for SoftLayer hardware
    # servers, while False means the server has montly pricing
    softlayer_backend_vlan_id = params.get('softlayer_backend_vlan_id', None)
    hourly = params.get('hourly', True)
    sec_group = params.get('security_group', '')
    vnfs = params.get('vnfs', [])
    expiration = params.get('expiration', {})
    description = params.get('description', '')
    folder = params.get('folders', None)
    datastore = params.get('datastore', None)
    job_id = params.get('job_id')
    # The `job` variable points to the event that started the job. If a job_id
    # is not provided, then it means that this is the beginning of a new story
    # that starts with a `create_machine` event. If a job_id is provided that
    # means that the current event will be part of already existing, unknown
    # story. TODO: Provide the `job` in the request's params or query it.
    if not job_id:
        job = 'create_machine'
        job_id = uuid.uuid4().hex
    else:
        job = None

    auth_context = auth_context_from_request(request)

    try:
        cloud = Cloud.objects.get(owner=auth_context.owner,
                                  id=cloud_id,
                                  deleted=None)
    except Cloud.DoesNotExist:
        raise NotFoundError('Cloud does not exist')

    # FIXME For backwards compatibility.
    if cloud.ctl.provider in (
            'vsphere',
            'onapp',
            'libvirt',
    ):
        if not size or not isinstance(size, dict):
            size = {}
        for param in (
                'size_ram',
                'size_cpu',
                'size_disk_primary',
                'size_disk_swap',
                'boot',
                'build',
                'cpu_priority',
                'cpu_sockets',
                'cpu_threads',
                'port_speed',
                'hypervisor_group_id',
        ):
            if param in params and params[param]:
                size[param.replace('size_', '')] = params[param]

    # compose schedule as a dict from relative parameters
    if not params.get('schedule_type'):
        schedule = {}
    else:
        if params.get('schedule_type') not in [
                'crontab', 'interval', 'one_off'
        ]:
            raise BadRequestError('schedule type must be one of '
                                  'these (crontab, interval, one_off)]')
        if params.get('schedule_entry') == {}:
            raise RequiredParameterMissingError('schedule_entry')

        schedule = {
            'name': params.get('name'),
            'description': params.get('description', ''),
            'action': params.get('action', ''),
            'script_id': params.get('schedule_script_id', ''),
            'schedule_type': params.get('schedule_type'),
            'schedule_entry': params.get('schedule_entry'),
            'expires': params.get('expires', ''),
            'start_after': params.get('start_after', ''),
            'max_run_count': params.get('max_run_count'),
            'task_enabled': bool(params.get('task_enabled', True)),
            'auth_context': auth_context.serialize(),
        }

    auth_context.check_perm("cloud", "read", cloud_id)
    auth_context.check_perm("cloud", "create_resources", cloud_id)
    if location_id:
        auth_context.check_perm("location", "read", location_id)
        auth_context.check_perm("location", "create_resources", location_id)

    tags, constraints = auth_context.check_perm("machine", "create", None)
    if script_id:
        auth_context.check_perm("script", "run", script_id)
    if key_id:
        auth_context.check_perm("key", "read", key_id)

    # Parse tags.
    try:
        mtags = params.get('tags') or {}
        if not isinstance(mtags, dict):
            if not isinstance(mtags, list):
                raise ValueError()
            if not all((isinstance(t, dict) and len(t) is 1 for t in mtags)):
                raise ValueError()
            mtags = {
                key: val
                for item in mtags for key, val in list(item.items())
            }
        security_tags = auth_context.get_security_tags()
        for mt in mtags:
            if mt in security_tags:
                raise ForbiddenError(
                    'You may not assign tags included in a Team access policy:'
                    ' `%s`' % mt)
        tags.update(mtags)
    except ValueError:
        raise BadRequestError('Invalid tags format. Expecting either a '
                              'dictionary of tags or a list of single-item '
                              'dictionaries')

    # check expiration constraint
    exp_constraint = constraints.get('expiration', {})
    if exp_constraint:
        try:
            from mist.rbac.methods import check_expiration
            check_expiration(expiration, exp_constraint)
        except ImportError:
            pass

    # check cost constraint
    cost_constraint = constraints.get('cost', {})
    if cost_constraint:
        try:
            from mist.rbac.methods import check_cost
            check_cost(auth_context.org, cost_constraint)
        except ImportError:
            pass

    args = (cloud_id, key_id, machine_name, location_id, image_id, size,
            image_extra, disk, image_name, size_name, location_name, ips,
            monitoring, storage_account, machine_password, resource_group,
            storage_account_type, networks, subnetwork, docker_env,
            docker_command)
    kwargs = {
        'script_id': script_id,
        'script_params': script_params,
        'script': script,
        'job': job,
        'job_id': job_id,
        'docker_port_bindings': docker_port_bindings,
        'docker_exposed_ports': docker_exposed_ports,
        'azure_port_bindings': azure_port_bindings,
        'hostname': hostname,
        'plugins': plugins,
        'post_script_id': post_script_id,
        'post_script_params': post_script_params,
        'disk_size': disk_size,
        'disk_path': disk_path,
        'cloud_init': cloud_init,
        'subnet_id': subnet_id,
        'associate_floating_ip': associate_floating_ip,
        'associate_floating_ip_subnet': associate_floating_ip_subnet,
        'project_id': project_id,
        'bare_metal': bare_metal,
        'tags': tags,
        'hourly': hourly,
        'schedule': schedule,
        'softlayer_backend_vlan_id': softlayer_backend_vlan_id,
        'machine_username': machine_username,
        'volumes': volumes,
        'ip_addresses': ip_addresses,
        'vnfs': vnfs,
        'expiration': expiration,
        'folder': folder,
        'datastore': datastore,
        'ephemeral': params.get('ephemeral', False),
        'lxd_image_source': params.get('lxd_image_source', None),
        'sec_group': sec_group,
        'description': description
    }

    if not run_async:
        ret = methods.create_machine(auth_context, *args, **kwargs)
    else:
        args = (auth_context.serialize(), ) + args
        kwargs.update({'quantity': quantity, 'persist': persist})
        tasks.create_machine_async.apply_async(args, kwargs, countdown=2)
        ret = {'job_id': job_id}
    ret.update({'job': job})
    return ret
Ejemplo n.º 10
0
def deploy_plugin(request):
    """
    Tags: monitoring
    ---
    Deploy a custom plugin on a machine

    Adds a scripts, which is then deployed on the specified machine to collect
    custom metrics.

    READ permission required on cloud.
    EDIT_CUSTOM_METRICS permission required on machine.

    ---

    machine:
      in: path
      type: string
      required: true
      description: the UUID of the machine on which to deploy the custom script
    plugin:
      in: path
      type: string
      required: true
      description: the name of the custom plugin/script
    plugin_type:
      in: query
      type: string
      required: true
      description: the plugin's type, e.g. "python" for python scripts
    read_function:
      in: query
      type: string
      required: false
      description: the source code of the custom plugin/script
    value_type:
      in: query
      type: string
      default: gauge
      required: false
      description: the type of the computed value
    name:
      in: query
      type: string
      required: false
      description: the name of the resulted associated metric
    unit:
      in: query
      type: string
      required: false
      description: the unit of the resulted associated metric, e.g. "bytes"

    """
    auth_context = auth_context_from_request(request)
    machine = _machine_from_matchdict(request)
    params = params_from_request(request)

    name = request.matchdict['plugin']

    # SEC check permission EDIT_CUSTOM_METRICS on machine
    auth_context.check_perm('machine', 'edit_custom_metrics', machine.id)

    if not machine.monitoring.hasmonitoring:
        raise ForbiddenError("Machine doesn't seem to have monitoring enabled")

    # Prepare params
    kwargs = {
        'location_type': 'inline',
        'extra': {
            'value_type': params.get('value_type', 'gauge'),
            'value_unit': params.get('unit', ''),
            'value_name': params.get('name', ''),
        },
        'script': params.get('read_function'),
        'description': 'python plugin'
    }

    # FIXME Telegraf can load any sort of executable, not just python scripts.
    if params.get('plugin_type') == 'python':
        # Add the script.
        script = TelegrafScript.add(auth_context.owner, name, **kwargs)
        # Deploy it.
        return script.ctl.deploy_and_assoc_python_plugin_from_script(machine)
    raise BadRequestError('Invalid plugin_type')
Ejemplo n.º 11
0
def machine_console(request):
    """
    Tags: machines
    ---
    Open VNC console.
    Generate and return an URI to open a VNC console to target machine
    READ permission required on cloud.
    READ permission required on machine.
    ---
    cloud:
      in: path
      required: true
      type: string
    machine:
      in: path
      required: true
      type: string
    rdp_port:
      default: 3389
      in: query
      required: true
      type: integer
    host:
      in: query
      required: true
      type: string
    """
    cloud_id = request.matchdict.get('cloud')

    auth_context = auth_context_from_request(request)

    if cloud_id:
        machine_id = request.matchdict['machine']
        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_uuid'] = machine.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine_uuid']
        try:
            machine = Machine.objects.get(id=machine_uuid,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_id'] = machine.machine_id
            request.environ['cloud_id'] = machine.cloud.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    auth_context.check_perm("machine", "read", machine.id)

    if machine.cloud.ctl.provider not in ['vsphere', 'openstack', 'libvirt']:
        raise MistNotImplementedError(
            "VNC console only supported for vSphere, OpenStack or KVM")

    if machine.cloud.ctl.provider == 'libvirt':
        import xml.etree.ElementTree as ET
        from html import unescape
        from datetime import datetime
        import hmac
        import hashlib
        xml_desc = unescape(machine.extra.get('xml_description', ''))
        root = ET.fromstring(xml_desc)
        vnc_element = root.find('devices').find('graphics[@type="vnc"]')
        if not vnc_element:
            raise MethodNotAllowedError(
                "VNC console not supported by this KVM domain")
        vnc_port = vnc_element.attrib.get('port')
        vnc_host = vnc_element.attrib.get('listen')
        from mongoengine import Q
        # Get key associations, prefer root or sudoer ones
        key_associations = KeyMachineAssociation.objects(
            Q(machine=machine.parent) & (Q(ssh_user='******') | Q(sudo=True))) \
            or KeyMachineAssociation.objects(machine=machine.parent)
        if not key_associations:
            raise ForbiddenError()
        key_id = key_associations[0].key.id
        host = '%s@%s:%d' % (key_associations[0].ssh_user,
                             machine.parent.hostname, key_associations[0].port)
        expiry = int(datetime.now().timestamp()) + 100
        msg = '%s,%s,%s,%s,%s' % (host, key_id, vnc_host, vnc_port, expiry)
        mac = hmac.new(config.SECRET.encode(),
                       msg=msg.encode(),
                       digestmod=hashlib.sha256).hexdigest()
        base_ws_uri = config.CORE_URI.replace('http', 'ws')
        proxy_uri = '%s/proxy/%s/%s/%s/%s/%s/%s' % (
            base_ws_uri, host, key_id, vnc_host, vnc_port, expiry, mac)
        return render_to_response('../templates/novnc.pt', {'url': proxy_uri})
    if machine.cloud.ctl.provider == 'vsphere':
        console_uri = machine.cloud.ctl.compute.connection.ex_open_console(
            machine.machine_id)
        protocol, host = config.CORE_URI.split('://')
        protocol = protocol.replace('http', 'ws')
        params = urllib.parse.urlencode({'url': console_uri})
        proxy_uri = f"{protocol}://{host}/wsproxy/?{params}"
        return render_to_response('../templates/novnc.pt', {'url': proxy_uri})
    else:
        console_url = machine.cloud.ctl.compute.connection.ex_open_console(
            machine.machine_id)
    raise RedirectError(console_url)