Example #1
0
def edit_machine(request):
    """
    Tags: machines
    ---
    Edits a machine.
    For now expiration related attributes can change.
    READ permission required on cloud.
    EDIT permission required on machine.
    ---
    expiration:
      type: object
      properties:
        date:
          type: string
          description: format should be ΥΥΥΥ-ΜΜ-DD HH:MM:SS
        action:
          type: string
          description: one of ['stop', 'destroy']
        notify:
          type: integer
          description: seconds before the expiration date to be notified
    """
    cloud_id = request.matchdict.get('cloud')
    params = params_from_request(request)
    auth_context = auth_context_from_request(request)

    if cloud_id:
        machine_id = request.matchdict['machine']
        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_uuid'] = machine.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine_uuid']
        try:
            machine = Machine.objects.get(id=machine_uuid)
            # VMs in libvirt can be started no matter if they are terminated
            if machine.state == 'terminated' and not isinstance(
                    machine.cloud, LibvirtCloud):
                raise NotFoundError("Machine %s has been terminated" %
                                    machine_uuid)
            # used by logging_view_decorator
            request.environ['machine_id'] = machine.machine_id
            request.environ['cloud_id'] = machine.cloud.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    if machine.cloud.owner != auth_context.owner:
        raise NotFoundError("Machine %s doesn't exist" % machine.id)

    tags, constraints = auth_context.check_perm("machine", "edit", machine.id)
    expiration = params.get('expiration', {})
    # check expiration constraint
    exp_constraint = constraints.get('expiration', {})
    if exp_constraint:
        try:
            from mist.rbac.methods import check_expiration
            check_expiration(expiration, exp_constraint)
        except ImportError:
            pass

    return machine.ctl.update(auth_context, params)
Example #2
0
def create_machine(request):
    """
    Tags: machines
    ---
    Creates one or more machines on the specified cloud. If async is true, a
    jobId will be returned.
    READ permission required on cloud.
    CREATE_RESOURCES permission required on cloud.
    READ permission required on location.
    CREATE_RESOURCES permission required on location.
    CREATE permission required on machine.
    RUN permission required on script.
    READ permission required on key.
    ---
    cloud:
      in: path
      required: true
      type: string
    name:
      type: string
      description: Name of the machine
      required: true
      example: "my-digital-ocean-machine"
    image:
      description: Provider's image id
      required: true
      type: string
      example: "17384153"
    size:
      type: string
      description: Mist internal size id
      example: "9417745961a84bffbf6419e5of68faa5"
    location:
      type: string
      description: Mist internal location id
      example: "3462b4dfbb434986a7dac362789bc402"
    key:
      description: Associate machine with this key. Mist internal key id
      type: string
      example: "da1df7d0402043b9a9c786b100992888"
    monitoring:
      type: boolean
      description: Enable monitoring on the machine
      example: false
    async:
      description: Create machine asynchronously, returning a jobId
      type: boolean
      example: false
    cloud_init:
      description: Cloud Init script
      type: string
    networks:
      type: array
      items:
        type: string
    subnet_id:
      type: string
      description: Optional for EC2
    subnetwork:
      type: string
    image_extra:
      type: string
      description: Required for GCE and Linode and VSphere 6.7
    schedule:
      type: object
    script:
      type: string
    script_id:
      type: string
      example: "e7ac65fb4b23453486778585616b2bb8"
    script_params:
      type: string
    plugins:
      type: array
      items:
        type: string
    post_script_id:
      type: string
    post_script_params:
      type: string
    associate_floating_ip:
      type: boolean
      description: Required for Openstack. Either 'true' or 'false'
    azure_port_bindings:
      type: string
      description: Required for Azure
    storage_account:
      type: string
      description: Required for Azure_arm.
    resource_group:
      type: string
      description: Required for Azure_arm.
    storage_account_type:
      type: string
      description: Required for Azure_arm
    machine_password:
      type: string
      description: Required for Azure_arm
    machine_username:
      type: string
      description: Required for Azure_arm
    bare_metal:
      description: Needed only by SoftLayer cloud
      type: boolean
    billing:
      description: Needed only by SoftLayer cloud
      type: string
      example: "hourly"
    boot:
      description: Required for OnApp
      type: boolean
    build:
      description: Required for OnApp
      type: boolean
    docker_command:
      type: string
    docker_env:
      type: array
      items:
        type: string
    docker_exposed_ports:
      type: object
    docker_port_bindings:
      type: object
    project_id:
      description: ' Needed only by Packet cloud'
      type: string
    softlayer_backend_vlan_id:
      description: 'Specify id of a backend(private) vlan'
      type: integer
    ssh_port:
      type: integer
      example: 22
    ip_addresses:
      type: array
      items:
        type:
          object
    security_group:
      type: string
      description: Machine will join this security group
    vnfs:
      description: Network Virtual Functions to configure in machine
      type: array
      items:
        type: string
      description:
        description: Description of machine. Only for GigG8 machines
        type: string
    """

    params = params_from_request(request)
    cloud_id = request.matchdict['cloud']
    for key in ('name', 'size'):
        if key not in params:
            raise RequiredParameterMissingError(key)

    key_id = params.get('key')
    machine_name = params['name']
    location_id = params.get('location', None)
    image_id = params.get('image')
    if not image_id:
        raise RequiredParameterMissingError("image")
    # this is used in libvirt
    disk_size = int(params.get('libvirt_disk_size', 4))
    disk_path = params.get('libvirt_disk_path', '')
    size = params.get('size', None)
    # deploy_script received as unicode, but ScriptDeployment wants str
    script = str(params.get('script', ''))
    # these are required only for Linode/GCE, passing them anyway
    image_extra = params.get('image_extra', None)
    disk = params.get('disk', None)
    image_name = params.get('image_name', None)
    size_name = params.get('size_name', None)
    location_name = params.get('location_name', None)
    ips = params.get('ips', None)
    monitoring = params.get('monitoring', False)
    storage_account = params.get('storage_account', '')
    storage_account_type = params.get('storage_account_type', '')
    machine_password = params.get('machine_password', '')
    machine_username = params.get('machine_username', '')
    resource_group = params.get('resource_group', '')
    volumes = params.get('volumes', [])
    if volumes and volumes[0].get('volume_id'):
        request.matchdict['volume'] = volumes[0].get('volume_id')
    networks = params.get('networks', [])
    if isinstance(networks, str):
        networks = [networks]
    subnet_id = params.get('subnet_id', '')
    subnetwork = params.get('subnetwork', None)
    ip_addresses = params.get('ip_addresses', [])
    docker_env = params.get('docker_env', [])
    docker_command = params.get('docker_command', None)
    script_id = params.get('script_id', '')
    script_params = params.get('script_params', '')
    post_script_id = params.get('post_script_id', '')
    post_script_params = params.get('post_script_params', '')
    run_async = params.get('async', False)
    quantity = params.get('quantity', 1)
    persist = params.get('persist', False)
    docker_port_bindings = params.get('docker_port_bindings', {})
    docker_exposed_ports = params.get('docker_exposed_ports', {})
    azure_port_bindings = params.get('azure_port_bindings', '')
    # hostname: if provided it will be attempted to assign a DNS name
    hostname = params.get('hostname', '')
    plugins = params.get('plugins')
    cloud_init = params.get('cloud_init', '')
    associate_floating_ip = params.get('associate_floating_ip', False)
    associate_floating_ip_subnet = params.get('attach_floating_ip_subnet',
                                              None)
    project_id = params.get('project', None)
    bare_metal = params.get('bare_metal', False)
    # bare_metal True creates a hardware server in SoftLayer,
    # whule bare_metal False creates a virtual cloud server
    # hourly True is the default setting for SoftLayer hardware
    # servers, while False means the server has montly pricing
    softlayer_backend_vlan_id = params.get('softlayer_backend_vlan_id', None)
    hourly = params.get('hourly', True)
    sec_group = params.get('security_group', '')
    vnfs = params.get('vnfs', [])
    expiration = params.get('expiration', {})
    description = params.get('description', '')
    folder = params.get('folders', None)
    datastore = params.get('datastore', None)
    job_id = params.get('job_id')
    # The `job` variable points to the event that started the job. If a job_id
    # is not provided, then it means that this is the beginning of a new story
    # that starts with a `create_machine` event. If a job_id is provided that
    # means that the current event will be part of already existing, unknown
    # story. TODO: Provide the `job` in the request's params or query it.
    if not job_id:
        job = 'create_machine'
        job_id = uuid.uuid4().hex
    else:
        job = None

    auth_context = auth_context_from_request(request)

    try:
        cloud = Cloud.objects.get(owner=auth_context.owner,
                                  id=cloud_id,
                                  deleted=None)
    except Cloud.DoesNotExist:
        raise NotFoundError('Cloud does not exist')

    # compose schedule as a dict from relative parameters
    if not params.get('schedule_type'):
        schedule = {}
    else:
        if params.get('schedule_type') not in [
                'crontab', 'interval', 'one_off'
        ]:
            raise BadRequestError('schedule type must be one of '
                                  'these (crontab, interval, one_off)]')
        if params.get('schedule_entry') == {}:
            raise RequiredParameterMissingError('schedule_entry')

        schedule = {
            'name': params.get('name'),
            'description': params.get('description', ''),
            'action': params.get('action', ''),
            'script_id': params.get('schedule_script_id', ''),
            'schedule_type': params.get('schedule_type'),
            'schedule_entry': params.get('schedule_entry'),
            'expires': params.get('expires', ''),
            'start_after': params.get('start_after', ''),
            'max_run_count': params.get('max_run_count'),
            'task_enabled': bool(params.get('task_enabled', True)),
            'auth_context': auth_context.serialize(),
        }

    auth_context.check_perm("cloud", "read", cloud_id)
    auth_context.check_perm("cloud", "create_resources", cloud_id)
    if location_id:
        auth_context.check_perm("location", "read", location_id)
        auth_context.check_perm("location", "create_resources", location_id)

    tags, constraints = auth_context.check_perm("machine", "create", None)
    if script_id:
        auth_context.check_perm("script", "run", script_id)
    if key_id:
        auth_context.check_perm("key", "read", key_id)

    # Parse tags.
    try:
        mtags = params.get('tags') or {}
        if not isinstance(mtags, dict):
            if not isinstance(mtags, list):
                raise ValueError()
            if not all((isinstance(t, dict) and len(t) is 1 for t in mtags)):
                raise ValueError()
            mtags = {
                key: val
                for item in mtags for key, val in list(item.items())
            }
        security_tags = auth_context.get_security_tags()
        for mt in mtags:
            if mt in security_tags:
                raise ForbiddenError(
                    'You may not assign tags included in a Team access policy:'
                    ' `%s`' % mt)
        tags.update(mtags)
    except ValueError:
        raise BadRequestError('Invalid tags format. Expecting either a '
                              'dictionary of tags or a list of single-item '
                              'dictionaries')

    # check expiration constraint
    exp_constraint = constraints.get('expiration', {})
    if exp_constraint:
        try:
            from mist.rbac.methods import check_expiration
            check_expiration(expiration, exp_constraint)
        except ImportError:
            pass

    # check cost constraint
    cost_constraint = constraints.get('cost', {})
    if cost_constraint:
        try:
            from mist.rbac.methods import check_cost
            check_cost(auth_context.org, cost_constraint)
        except ImportError:
            pass

    args = (cloud_id, key_id, machine_name, location_id, image_id, size,
            image_extra, disk, image_name, size_name, location_name, ips,
            monitoring, storage_account, machine_password, resource_group,
            storage_account_type, networks, subnetwork, docker_env,
            docker_command)
    kwargs = {
        'script_id': script_id,
        'script_params': script_params,
        'script': script,
        'job': job,
        'job_id': job_id,
        'docker_port_bindings': docker_port_bindings,
        'docker_exposed_ports': docker_exposed_ports,
        'azure_port_bindings': azure_port_bindings,
        'hostname': hostname,
        'plugins': plugins,
        'post_script_id': post_script_id,
        'post_script_params': post_script_params,
        'disk_size': disk_size,
        'disk_path': disk_path,
        'cloud_init': cloud_init,
        'subnet_id': subnet_id,
        'associate_floating_ip': associate_floating_ip,
        'associate_floating_ip_subnet': associate_floating_ip_subnet,
        'project_id': project_id,
        'bare_metal': bare_metal,
        'tags': tags,
        'hourly': hourly,
        'schedule': schedule,
        'softlayer_backend_vlan_id': softlayer_backend_vlan_id,
        'machine_username': machine_username,
        'volumes': volumes,
        'ip_addresses': ip_addresses,
        'vnfs': vnfs,
        'expiration': expiration,
        'folder': folder,
        'datastore': datastore,
        'ephemeral': params.get('ephemeral', False),
        'lxd_image_source': params.get('lxd_image_source', None),
        'sec_group': sec_group,
        'description': description
    }

    if not run_async:
        ret = methods.create_machine(auth_context, *args, **kwargs)
    else:
        args = (auth_context.serialize(), ) + args
        kwargs.update({'quantity': quantity, 'persist': persist})
        tasks.create_machine_async.apply_async(args, kwargs, countdown=2)
        ret = {'job_id': job_id}
    ret.update({'job': job})
    return ret
Example #3
0
def add_machine(request):
    """
    Tags: machines
    ---
    Add a machine to an OtherServer/Libvirt Cloud.
    READ permission required on cloud.
    EDIT permission required on cloud.
    READ permission required on key.
    ---
    cloud:
      in: path
      required: true
      type: string
    machine_hostname:
      type: string
      required: true
    operating_system:
      type: string
    machine_name:
      type: string
    machine_key:
      type: string
    machine_user:
      type: string
    machine_port:
      type: string
    remote_desktop_port:
      type: string
    monitoring:
      type: boolean
    images_location:
      type: string
    """
    cloud_id = request.matchdict.get('cloud')

    auth_context = auth_context_from_request(request)

    try:
        cloud = Cloud.objects.get(owner=auth_context.owner,
                                  id=cloud_id,
                                  deleted=None)
    except Cloud.DoesNotExist:
        raise NotFoundError('Cloud does not exist')

    if cloud.ctl.provider not in ['libvirt', 'bare_metal']:
        raise MistNotImplementedError()

    params = params_from_request(request)
    machine_hostname = params.get('machine_hostname')
    if not machine_hostname:
        raise RequiredParameterMissingError("machine_hostname")

    operating_system = params.get('operating_system', '')
    machine_name = params.get('machine_name', '')
    machine_key = params.get('machine_key', '')
    machine_user = params.get('machine_user', '')
    machine_port = params.get('machine_port', 22)
    remote_desktop_port = params.get('remote_desktop_port', '')
    images_location = params.get('images_location', '')
    monitoring = params.get('monitoring', False)

    job_id = params.get('job_id')
    if not job_id:
        job = 'add_machine'
        job_id = uuid.uuid4().hex
    else:
        job = None

    auth_context.check_perm("cloud", "read", cloud_id)
    auth_context.check_perm("cloud", "edit", cloud_id)

    if machine_key:
        auth_context.check_perm("key", "read", machine_key)

    log.info('Adding host machine %s on cloud %s' % (machine_name, cloud_id))

    try:
        machine = cloud.ctl.add_machine(host=machine_hostname,
                                        ssh_user=machine_user,
                                        ssh_port=machine_port,
                                        ssh_key=machine_key,
                                        name=machine_name,
                                        os_type=operating_system,
                                        rdp_port=remote_desktop_port,
                                        images_location=images_location)
    except Exception as e:
        raise MachineCreationError("Adding host got exception %r" % e, exc=e)

    # Enable monitoring
    if monitoring:
        monitor = enable_monitoring(
            auth_context.owner,
            cloud.id,
            machine.machine_id,
            no_ssh=not (machine.os_type == 'unix'
                        and KeyMachineAssociation.objects(machine=machine)))

    ret = {
        'id': machine.id,
        'name': machine.name,
        'extra': {},
        'public_ips': machine.public_ips,
        'private_ips': machine.private_ips,
        'job_id': job_id,
        'job': job
    }

    if monitoring:
        ret.update({'monitoring': monitor})

    return ret
Example #4
0
def get_events(auth_context,
               owner_id='',
               user_id='',
               event_type='',
               action='',
               limit=0,
               start=0,
               stop=0,
               newest=True,
               error=None,
               **kwargs):
    """Fetch logged events.

    This generator yields a series of logs after querying Elasticsearch.

    The initial query is extended with additional terms based on the inputs
    provided. Also, extra filtering may be applied in order to perform RBAC
    on the fly given the permissions granted to the requesting User.

    All Elasticsearch indices are in the form of <app|ui>-logs-<date>.

    """
    # Restrict access to UI logs to Admins only.
    is_admin = auth_context and auth_context.user.role == 'Admin'

    # Attempt to enforce owner_id in case of non-Admins.
    if not is_admin and not owner_id:
        owner_id = auth_context.owner.id if auth_context else None

    # Construct base Elasticsearch query.
    index = "%s-logs-*" % ("*" if is_admin else "app")
    query = {
        "query": {
            "bool": {
                "filter": {
                    "bool": {
                        "must": [{
                            "range": {
                                "@timestamp": {
                                    "gte": int(start * 1000),
                                    "lte": int(stop * 1000) or "now"
                                }
                            }
                        }],
                        "must_not": []
                    }
                }
            }
        },
        "sort": [{
            "@timestamp": {
                "order": ("desc" if newest else "asc")
            }
        }],
        "size": (limit or 50)
    }
    # Match action.
    if action:
        query["query"]["bool"]["filter"]["bool"]["must"].append(
            {"term": {
                'action': action
            }})
    # Fetch logs corresponding to the current Organization.
    if owner_id:
        query["query"]["bool"]["filter"]["bool"]["must"].append(
            {"term": {
                "owner_id": owner_id
            }})
    # Match the user's ID, if provided.
    if user_id:
        query["query"]["bool"]["filter"]["bool"]["must"].append(
            {"term": {
                "user_id": user_id
            }})
    # Specify whether to fetch stories that ended with an error.
    if error:
        query["query"]["bool"]["filter"]["bool"]["must_not"].append(
            {"term": {
                "error": False
            }})
    elif error is False:
        query["query"]["bool"]["filter"]["bool"]["must"].append(
            {"term": {
                "error": False
            }})
    # Perform a complex "Query String" Query that may span fields.
    if 'filter' in kwargs:
        f = kwargs.pop('filter')
        query_string = {
            'query': f,
            'analyze_wildcard': True,
            'default_operator': 'and',
            'allow_leading_wildcard': False
        }
        query["query"]["bool"]["filter"]["bool"]["must"].append(
            {'query_string': query_string})

    # Extend query with additional kwargs.
    for key, value in kwargs.items():
        query["query"]["bool"]["filter"]["bool"]["must"].append(
            {"term": {
                key: value
            }})

    # Apply RBAC for non-Owners.
    if auth_context and not auth_context.is_owner():
        filter_logs(auth_context, query)

    # Query Elasticsearch.
    try:
        result = es().search(index=index, doc_type=event_type, body=query)
    except eexc.NotFoundError as err:
        log.error('Error %s during ES query: %s', err.status_code, err.info)
        raise NotFoundError(err.error)
    except (eexc.RequestError, eexc.TransportError) as err:
        log.error('Error %s during ES query: %s', err.status_code, err.info)
        raise BadRequestError(err.error)
    except (eexc.ConnectionError, eexc.ConnectionTimeout) as err:
        log.error('Error %s during ES query: %s', err.status_code, err.info)
        raise ServiceUnavailableError(err.error)

    for hit in result['hits']['hits']:
        event = hit['_source']
        if not event.get('action'):
            log.error('Skipped event %s, missing action', event['log_id'])
            continue
        try:
            extra = json.loads(event.pop('extra'))
        except Exception as exc:
            log.error('Failed to parse extra of event %s: %r', event, exc)
        else:
            for key, value in extra.items():
                event[key] = value
        if event.get('su') and not is_admin:
            continue
        yield event
Example #5
0
def toggle_cloud(request):
    """
    Tags: clouds
    ---
    Toggles cloud with given cloud id.
    EDIT permission required on cloud.
    ---
    cloud_id:
      in: path
      required: true
      type: string
    new_state:
      enum:
      - '0'
      - '1'
      required: true
      type: string
    """
    auth_context = auth_context_from_request(request)
    cloud_id = request.matchdict['cloud']
    try:
        cloud = Cloud.objects.get(owner=auth_context.owner,
                                  id=cloud_id,
                                  deleted=None)
    except Cloud.DoesNotExist:
        raise NotFoundError('Cloud does not exist')

    auth_context.check_perm('cloud', 'edit', cloud_id)

    new_state = params_from_request(request).get('new_state', None)
    dns_enabled = params_from_request(request).get('dns_enabled', None)
    observation_logs_enabled = params_from_request(request).get(
        'observation_logs_enabled', None)

    if new_state is None and dns_enabled is None and \
            observation_logs_enabled is None:
        raise RequiredParameterMissingError('new_state or dns_enabled or \
          observation_logs_enabled')

    if new_state == '1':
        cloud.ctl.enable()
    elif new_state == '0':
        cloud.ctl.disable()
    elif new_state:
        raise BadRequestError('Invalid cloud state')

    if dns_enabled == 1:
        cloud.ctl.dns_enable()
    elif dns_enabled == 0:
        cloud.ctl.dns_disable()
    elif dns_enabled:
        raise BadRequestError('Invalid DNS state')

    if observation_logs_enabled == 1:
        cloud.ctl.observation_logs_enable()
    elif observation_logs_enabled == 0:
        cloud.ctl.observation_logs_disable()
    elif observation_logs_enabled:
        raise BadRequestError('Invalid observation_logs_enabled state')

    trigger_session_update(auth_context.owner, ['clouds'])
    return OK
Example #6
0
def run_script(request):
    """
    Start a script job to run the script.
    READ permission required on cloud.
    RUN_SCRIPT permission required on machine.
    RUN permission required on script.
    ---
    script_id:
      in: path
      required: true
      type: string
    machine_uuid:
      required: true
      type: string
    params:
      type: string
    su:
      type: boolean
    env:
      type: string
    job_id:
      type: string
    """
    script_id = request.matchdict['script_id']
    params = params_from_request(request)
    script_params = params.get('params', '')
    su = params.get('su', False)
    env = params.get('env')
    job_id = params.get('job_id')
    if not job_id:
        job = 'run_script'
        job_id = uuid.uuid4().hex
    else:
        job = None
    if isinstance(env, dict):
        env = json.dumps(env)

    auth_context = auth_context_from_request(request)
    if 'machine_uuid' in params:
        machine_uuid = params.get('machine_uuid')
        if not machine_uuid:
            raise RequiredParameterMissingError('machine_uuid')

        try:
            machine = Machine.objects.get(id=machine_uuid,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_id'] = machine.machine_id
            request.environ['cloud_id'] = machine.cloud.id
        except me.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)
        cloud_id = machine.cloud.id
    else:
        # this will be depracated, keep it for backwards compatibility
        cloud_id = params.get('cloud_id')
        machine_id = params.get('machine_id')

        for key in ('cloud_id', 'machine_id'):
            if key not in params:
                raise RequiredParameterMissingError(key)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_uuid'] = machine.id
        except me.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)

    # SEC require permission READ on cloud
    auth_context.check_perm("cloud", "read", cloud_id)
    # SEC require permission RUN_SCRIPT on machine
    auth_context.check_perm("machine", "run_script", machine.id)
    # SEC require permission RUN on script
    auth_context.check_perm('script', 'run', script_id)
    try:
        script = Script.objects.get(owner=auth_context.owner,
                                    id=script_id,
                                    deleted=None)
    except me.DoesNotExist:
        raise NotFoundError('Script id not found')
    job_id = job_id or uuid.uuid4().hex
    tasks.run_script.delay(auth_context.owner.id,
                           script.id,
                           machine.id,
                           params=script_params,
                           env=env,
                           su=su,
                           job_id=job_id,
                           job=job)
    return {'job_id': job_id, 'job': job}
Example #7
0
def associate_key(request):
    """
    Associate a key to a machine
    Associates a key with a machine. If host is set it will also attempt to
    actually deploy it to the machine. To do that it requires another key
    (existing_key) that can connect to the machine.
    READ permission required on cloud.
    READ_PRIVATE permission required on key.
    ASSOCIATE_KEY permission required on machine.
    ---
    machine:
      in: path
      required: true
      type: string
    key:
      in: path
      required: true
      type: string
    port:
      default: 22
      type: integer
    user:
      description: The ssh user
      type: string
    """
    key_id = request.matchdict['key']
    cloud_id = request.matchdict.get('cloud')

    params = params_from_request(request)
    ssh_user = params.get('user', None)
    try:
        ssh_port = int(request.json_body.get('port', 22))
    except:
        ssh_port = 22

    auth_context = auth_context_from_request(request)
    try:
        key = Key.objects.get(owner=auth_context.owner,
                              id=key_id,
                              deleted=None)
    except Key.DoesNotExist:
        raise NotFoundError('Key id does not exist')
    auth_context.check_perm('key', 'read_private', key.id)

    if cloud_id:
        # this is depracated, keep it for backwards compatibility
        machine_id = request.matchdict['machine']
        try:
            Cloud.objects.get(owner=auth_context.owner,
                              id=cloud_id,
                              deleted=None)
        except Cloud.DoesNotExist:
            raise NotFoundError('Cloud does not exist')

        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine']
        try:
            machine = Machine.objects.get(id=machine_uuid,
                                          state__ne='terminated')
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    auth_context.check_perm("machine", "associate_key", machine.id)

    key.ctl.associate(machine, username=ssh_user, port=ssh_port)
    clouds = Cloud.objects(owner=auth_context.owner, deleted=None)
    machines = Machine.objects(cloud__in=clouds,
                               key_associations__keypair__exact=key)

    assoc_machines = transform_key_machine_associations(machines, key)
    return assoc_machines
Example #8
0
def machine_console(request):
    """
    Tags: machines
    ---
    Open VNC console.
    Generate and return an URI to open a VNC console to target machine
    READ permission required on cloud.
    READ permission required on machine.
    ---
    cloud:
      in: path
      required: true
      type: string
    machine:
      in: path
      required: true
      type: string
    rdp_port:
      default: 3389
      in: query
      required: true
      type: integer
    host:
      in: query
      required: true
      type: string
    """
    cloud_id = request.matchdict.get('cloud')

    auth_context = auth_context_from_request(request)

    if cloud_id:
        machine_id = request.matchdict['machine']
        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_uuid'] = machine.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine_uuid']
        try:
            machine = Machine.objects.get(id=machine_uuid,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_id'] = machine.machine_id
            request.environ['cloud_id'] = machine.cloud.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    auth_context.check_perm("machine", "read", machine.id)

    if machine.cloud.ctl.provider != 'vsphere':
        raise NotImplementedError("VNC console only supported for vSphere")

    console_uri = machine.cloud.ctl.compute.connection.ex_open_console(
        machine.machine_id)

    raise RedirectError(console_uri)
Example #9
0
def triggered(request):
    """
    Tags: rules
    ---
    Process a trigger sent by the alert service.

    Based on the parameters of the request, this method will initiate actions
    to mitigate the conditions that triggered the rule and notify the users.

    ---

    value:
     type: integer
     required: true
     description: >
       the value that triggered the rule by exceeding the threshold
    incident:
     type: string
     required: true
     description: the incident's UUID
    resource:
     type: string
     required: true
     description: the UUID of the resource for which the rule got triggered
    triggered:
     type: integer
     required: true
     description: 0 if the specified incident got resolved/untriggered
    triggered_now:
     type: integer
     required: true
     description: |
       0 in case this is not the first time the specified incident has
       raised an alert
    firing_since:
     type: string
     required: true
     description: |
       the time at which the rule raised an alert and sent a trigger to
       this API endpoint
    pending_since:
     type: string
     required: true
     description: |
       the time at which the rule evaluated to True and entered pending
       state. A rule can remain in pending state if a TriggerOffset has
       been configured. Datetime needed
    resolved_since:
     type: string
     required: true
     description: >
       the time at which the incident with the specified UUID resolved.\
       Datetime needed

    """
    # Do not publicly expose this API endpoint?
    if config.CILIA_SECRET_KEY != request.headers.get('Cilia-Secret-Key'):
        raise UnauthorizedError()

    params = params_from_request(request)

    keys = (
        'value',
        'incident',
        'triggered',
        'triggered_now',
        'firing_since',
        'pending_since',
        'resolved_since',
    )
    for key in keys:
        if key not in params:
            raise RequiredParameterMissingError(key)

    # Get the rule's UUID.
    # TODO rule_id = request.matchdict['rule']
    rule_id = params['rule_id']

    # Get resource and incidents ids.
    incident_id = str(params['incident'])
    resource_id = str(params['resource'])

    # Get timestamps.
    firing_since = str(params['firing_since'])
    # pending_since = str(params['pending_since'])
    resolved_since = str(params['resolved_since'])

    try:
        value = params['value']
        value = float(value)
    except (TypeError, ValueError) as err:
        log.error('Failed to cast "%s" to float: %r', value, err)
        raise BadRequestError('Failed to convert %s to float' % value)

    def int_to_bool(param):
        try:
            return bool(int(param or 0))
        except (ValueError, TypeError) as err:
            log.error('Failed to cast int to bool: %r', err)
            raise BadRequestError('Failed to convert %s to boolean' % param)

    # Get flags indicating whether the incident has been (just) triggered.
    triggered = int_to_bool(params['triggered'])
    triggered_now = int_to_bool(params['triggered_now'])

    # Get the timestamp at which the rule's state changed.
    try:
        timestamp = resolved_since or firing_since
        timestamp = int(get_datetime(timestamp).strftime('%s'))
    except ValueError as err:
        log.error('Failed to cast datetime obj to unix timestamp: %r', err)
        raise BadRequestError(err)

    try:
        rule = Rule.objects.get(id=rule_id)
    except Rule.DoesNotExist:
        raise RuleNotFoundError()

    # Validate resource, if the rule is resource-bound.
    if not rule.is_arbitrary():
        resource_type = rule.resource_model_name
        Model = get_resource_model(resource_type)
        try:
            resource = Model.objects.get(id=resource_id, owner=rule.owner_id)
        except Model.DoesNotExist:
            raise NotFoundError('%s %s' % (resource_type, resource_id))
        if is_resource_missing(resource):
            raise NotFoundError('%s %s' % (resource_type, resource_id))
    else:
        resource_type = resource_id = None

    # Record the trigger, if it's a no-data, to refer to it later.
    if isinstance(rule, NoDataRule):
        if triggered:
            NoDataRuleTracker.add(rule.id, resource.id)
        else:
            NoDataRuleTracker.remove(rule.id, resource.id)
    # Run chain of rule's actions.
    run_chained_actions(
        rule.id,
        incident_id,
        resource_id,
        resource_type,
        value,
        triggered,
        triggered_now,
        timestamp,
    )
    return Response('OK', 200)
Example #10
0
def add_machine(request):
    """
    Tags: machines
    ---
    Add a machine to an OtherServer Cloud. This works for bare_metal clouds.
    ---
    cloud:
      in: path
      required: true
      type: string
    machine_ip:
      type: string
      required: true
    operating_system:
      type: string
    machine_name:
      type: string
    machine_key:
      type: string
    machine_user:
      type: string
    machine_port:
      type: string
    remote_desktop_port:
      type: string
    monitoring:
      type: boolean
    """
    cloud_id = request.matchdict.get('cloud')
    params = params_from_request(request)
    machine_ip = params.get('machine_ip')
    if not machine_ip:
        raise RequiredParameterMissingError("machine_ip")

    operating_system = params.get('operating_system', '')
    machine_name = params.get('machine_name', '')
    machine_key = params.get('machine_key', '')
    machine_user = params.get('machine_user', '')
    machine_port = params.get('machine_port', '')
    remote_desktop_port = params.get('remote_desktop_port', '')
    monitoring = params.get('monitoring', '')

    job_id = params.get('job_id')
    if not job_id:
        job = 'add_machine'
        job_id = uuid.uuid4().hex
    else:
        job = None

    auth_context = auth_context_from_request(request)
    auth_context.check_perm("cloud", "read", cloud_id)

    if machine_key:
        auth_context.check_perm("key", "read", machine_key)

    try:
        Cloud.objects.get(owner=auth_context.owner, id=cloud_id, deleted=None)
    except Cloud.DoesNotExist:
        raise NotFoundError('Cloud does not exist')

    log.info('Adding bare metal machine %s on cloud %s' %
             (machine_name, cloud_id))
    cloud = Cloud.objects.get(owner=auth_context.owner,
                              id=cloud_id,
                              deleted=None)

    try:
        machine = cloud.ctl.add_machine(machine_name,
                                        host=machine_ip,
                                        ssh_user=machine_user,
                                        ssh_port=machine_port,
                                        ssh_key=machine_key,
                                        os_type=operating_system,
                                        rdp_port=remote_desktop_port,
                                        fail_on_error=True)
    except Exception as e:
        raise MachineCreationError("OtherServer, got exception %r" % e, exc=e)

    # Enable monitoring
    if monitoring:
        monitor = enable_monitoring(auth_context.owner,
                                    cloud.id,
                                    machine.machine_id,
                                    no_ssh=not (machine.os_type == 'unix'
                                                and machine.key_associations))

    ret = {
        'id': machine.id,
        'name': machine.name,
        'extra': {},
        'public_ips': machine.public_ips,
        'private_ips': machine.private_ips,
        'job_id': job_id,
        'job': job
    }

    if monitoring:
        ret.update({'monitoring': monitor})

    return ret
Example #11
0
def machine_actions(request):
    """
    Tags: machines
    ---
    Calls a machine action on cloud that supports it.
    READ permission required on cloud.
    ACTION permission required on machine(ACTION can be START,
    STOP, DESTROY, REBOOT or RESIZE, RENAME for some providers).
    ---
    machine_uuid:
      in: path
      required: true
      type: string
    action:
      enum:
      - start
      - stop
      - reboot
      - destroy
      - resize
      - rename
      required: true
      type: string
    name:
      description: The new name of the renamed machine
      type: string
    size:
      description: The size id of the plan to resize
      type: string
    """
    cloud_id = request.matchdict.get('cloud')
    params = params_from_request(request)
    action = params.get('action', '')
    size_id = params.get('size', params.get('plan_id', ''))
    memory = params.get('memory', '')
    cpus = params.get('cpus', '')
    cpu_shares = params.get('cpu_shares', '')
    cpu_units = params.get('cpu_units', '')
    name = params.get('name', '')
    auth_context = auth_context_from_request(request)

    if cloud_id:
        # this is depracated, keep it for backwards compatibility
        machine_id = request.matchdict['machine']
        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_uuid'] = machine.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine_uuid']
        try:
            machine = Machine.objects.get(id=machine_uuid)
            # VMs in libvirt can be started no matter if they are terminated
            if machine.state == 'terminated' and not isinstance(
                    machine.cloud, LibvirtCloud):
                raise NotFoundError("Machine %s has been terminated" %
                                    machine_uuid)
            # used by logging_view_decorator
            request.environ['machine_id'] = machine.machine_id
            request.environ['cloud_id'] = machine.cloud.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    if machine.cloud.owner != auth_context.owner:
        raise NotFoundError("Machine %s doesn't exist" % machine.id)

    auth_context.check_perm("machine", action, machine.id)

    actions = ('start', 'stop', 'reboot', 'destroy', 'resize', 'rename',
               'undefine', 'suspend', 'resume', 'remove')

    if action not in actions:
        raise BadRequestError("Action '%s' should be "
                              "one of %s" % (action, actions))
    if action == 'destroy':
        methods.destroy_machine(auth_context.owner, cloud_id,
                                machine.machine_id)
    elif action == 'remove':
        log.info('Removing machine %s in cloud %s' %
                 (machine.machine_id, cloud_id))

        if not machine.monitoring.hasmonitoring:
            machine.ctl.remove()
            # Schedule a UI update
            trigger_session_update(auth_context.owner, ['clouds'])
            return

        # if machine has monitoring, disable it. the way we disable depends on
        # whether this is a standalone io installation or not
        try:
            disable_monitoring(auth_context.owner,
                               cloud_id,
                               machine_id,
                               no_ssh=True)
        except Exception as exc:
            log.warning(
                "Didn't manage to disable monitoring, maybe the "
                "machine never had monitoring enabled. Error: %r", exc)

        machine.ctl.remove()

        # Schedule a UI update
        trigger_session_update(auth_context.owner, ['clouds'])

    elif action in ('start', 'stop', 'reboot', 'undefine', 'suspend',
                    'resume'):
        getattr(machine.ctl, action)()
    elif action == 'rename':
        if not name:
            raise BadRequestError("You must give a name!")
        getattr(machine.ctl, action)(name)
    elif action == 'resize':
        kwargs = {}
        if memory:
            kwargs['memory'] = memory
        if cpus:
            kwargs['cpus'] = cpus
        if cpu_shares:
            kwargs['cpu_shares'] = cpu_shares
        if cpu_units:
            kwargs['cpu_units'] = cpu_units
        getattr(machine.ctl, action)(size_id, kwargs)

    # TODO: We shouldn't return list_machines, just OK. Save the API!
    return methods.filter_list_machines(auth_context, cloud_id)
Example #12
0
def create_machine(request):
    """
    Create machine(s) on cloud
    Creates one or more machines on the specified cloud. If async is true, a
    jobId will be returned.
    READ permission required on cloud.
    CREATE_RESOURCES permissn required on cloud.
    CREATE permission required on machine.
    RUN permission required on script.
    READ permission required on key.

    ---
    cloud:
      in: path
      required: true
      type: string
    async:
      description: ' Create machines asynchronously, returning a jobId'
      type: boolean
    quantity:
      description: ' The number of machines that will be created, async only'
      type: integer
    azure_port_bindings:
      type: string
    cloud_id:
      description: The Cloud ID
      required: true
      type: string
    disk:
      description: ' Only required by Linode cloud'
      type: string
    docker_command:
      type: string
    docker_env:
      items:
        type: string
      type: array
    docker_exposed_ports:
      type: object
    docker_port_bindings:
      type: object
    hostname:
      type: string
    image_extra:
      description: ' Needed only by Linode cloud'
      type: string
    image:
      description: ' Id of image to be used with the creation'
      required: true
      type: string
    image_name:
      type: string
    ips:
      type: string
    job_id:
      type: string
    key_id:
      description: ' Associate machine with this key_id'
      required: true
      type: string
    location_id:
      description: ' Id of the cloud''s location to create the machine'
      required: true
      type: string
    location_name:
      type: string
    machine_name:
      required: true
      type: string
    monitoring:
      type: string
    networks:
      items:
        type: string
      type: array
    plugins:
      items:
        type: string
      type: array
    post_script_id:
      type: string
    post_script_params:
      type: string
    script:
      type: string
    script_id:
      type: string
    script_params:
      type: string
    size_id:
      description: ' Id of the size of the machine'
      required: true
      type: string
    size_name:
      type: string
    ssh_port:
      type: integer
    softlayer_backend_vlan_id:
      description: 'Specify id of a backend(private) vlan'
      type: integer
    project_id:
      description: ' Needed only by Packet.net cloud'
      type: string
    billing:
      description: ' Needed only by SoftLayer cloud'
      type: string
    bare_metal:
      description: ' Needed only by SoftLayer cloud'
      type: string
    schedule:
      type: dict
    """

    params = params_from_request(request)
    cloud_id = request.matchdict['cloud']

    for key in ('name', 'size'):
        if key not in params:
            raise RequiredParameterMissingError(key)

    key_id = params.get('key')
    machine_name = params['name']
    location_id = params.get('location', None)
    image_id = params.get('image')
    if not image_id:
        raise RequiredParameterMissingError("image")
    # this is used in libvirt
    disk_size = int(params.get('libvirt_disk_size', 4))
    disk_path = params.get('libvirt_disk_path', '')
    size_id = params['size']
    # deploy_script received as unicode, but ScriptDeployment wants str
    script = str(params.get('script', ''))
    # these are required only for Linode/GCE, passing them anyway
    image_extra = params.get('image_extra', None)
    disk = params.get('disk', None)
    image_name = params.get('image_name', None)
    size_name = params.get('size_name', None)
    location_name = params.get('location_name', None)
    ips = params.get('ips', None)
    monitoring = params.get('monitoring', False)
    networks = params.get('networks', [])
    docker_env = params.get('docker_env', [])
    docker_command = params.get('docker_command', None)
    script_id = params.get('script_id', '')
    script_params = params.get('script_params', '')
    post_script_id = params.get('post_script_id', '')
    post_script_params = params.get('post_script_params', '')
    async = params.get('async', False)
    quantity = params.get('quantity', 1)
    persist = params.get('persist', False)
    docker_port_bindings = params.get('docker_port_bindings', {})
    docker_exposed_ports = params.get('docker_exposed_ports', {})
    azure_port_bindings = params.get('azure_port_bindings', '')
    # hostname: if provided it will be attempted to assign a DNS name
    hostname = params.get('hostname', '')
    plugins = params.get('plugins')
    cloud_init = params.get('cloud_init', '')
    associate_floating_ip = params.get('associate_floating_ip', False)
    associate_floating_ip_subnet = params.get('attach_floating_ip_subnet',
                                              None)
    project_id = params.get('project', None)
    bare_metal = params.get('bare_metal', False)
    # bare_metal True creates a hardware server in SoftLayer,
    # whule bare_metal False creates a virtual cloud server
    # hourly True is the default setting for SoftLayer hardware
    # servers, while False means the server has montly pricing
    softlayer_backend_vlan_id = params.get('softlayer_backend_vlan_id', None)
    hourly = params.get('billing', True)
    job_id = params.get('job_id')
    job_id = params.get('job_id')
    # The `job` variable points to the event that started the job. If a job_id
    # is not provided, then it means that this is the beginning of a new story
    # that starts with a `create_machine` event. If a job_id is provided that
    # means that the current event will be part of already existing, unknown
    # story. TODO: Provide the `job` in the request's params or query it.
    if not job_id:
        job = 'create_machine'
        job_id = uuid.uuid4().hex
    else:
        job = None

    # these are needed for OnApp
    size_ram = params.get('size_ram', 256)
    size_cpu = params.get('size_cpu', 1)
    size_disk_primary = params.get('size_disk_primary', 5)
    size_disk_swap = params.get('size_disk_swap', 1)
    boot = params.get('boot', True)
    build = params.get('build', True)
    cpu_priority = params.get('cpu_priority', 1)
    cpu_sockets = params.get('cpu_sockets', 1)
    cpu_threads = params.get('cpu_threads', 1)
    port_speed = params.get('port_speed', 0)
    hypervisor_group_id = params.get('hypervisor_group_id')

    auth_context = auth_context_from_request(request)

    try:
        Cloud.objects.get(owner=auth_context.owner, id=cloud_id, deleted=None)
    except Cloud.DoesNotExist:
        raise NotFoundError('Cloud does not exist')

    # compose schedule as a dict from relative parameters
    if not params.get('schedule_type'):
        schedule = {}
    else:
        if params.get('schedule_type') not in [
                'crontab', 'interval', 'one_off'
        ]:
            raise BadRequestError('schedule type must be one of '
                                  'these (crontab, interval, one_off)]')
        if params.get('schedule_entry') == {}:
            raise RequiredParameterMissingError('schedule_entry')

        schedule = {
            'name': params.get('name'),
            'description': params.get('description', ''),
            'action': params.get('action', ''),
            'script_id': params.get('schedule_script_id', ''),
            'schedule_type': params.get('schedule_type'),
            'schedule_entry': params.get('schedule_entry'),
            'expires': params.get('expires', ''),
            'start_after': params.get('start_after', ''),
            'max_run_count': params.get('max_run_count'),
            'task_enabled': bool(params.get('task_enabled', True)),
            'auth_context': auth_context.serialize(),
        }

    auth_context.check_perm("cloud", "read", cloud_id)
    auth_context.check_perm("cloud", "create_resources", cloud_id)
    tags = auth_context.check_perm("machine", "create", None) or {}
    if script_id:
        auth_context.check_perm("script", "run", script_id)
    if key_id:
        auth_context.check_perm("key", "read", key_id)

    # Parse tags.
    try:
        mtags = params.get('tags') or {}
        if not isinstance(mtags, dict):
            if not isinstance(mtags, list):
                raise ValueError()
            if not all((isinstance(t, dict) and len(t) is 1 for t in mtags)):
                raise ValueError()
            mtags = {key: val for item in mtags for key, val in item.items()}
        tags.update(mtags)
    except ValueError:
        raise BadRequestError('Invalid tags format. Expecting either a '
                              'dictionary of tags or a list of single-item '
                              'dictionaries')

    args = (cloud_id, key_id, machine_name, location_id, image_id, size_id,
            image_extra, disk, image_name, size_name, location_name, ips,
            monitoring, networks, docker_env, docker_command)
    kwargs = {
        'script_id': script_id,
        'script_params': script_params,
        'script': script,
        'job': job,
        'job_id': job_id,
        'docker_port_bindings': docker_port_bindings,
        'docker_exposed_ports': docker_exposed_ports,
        'azure_port_bindings': azure_port_bindings,
        'hostname': hostname,
        'plugins': plugins,
        'post_script_id': post_script_id,
        'post_script_params': post_script_params,
        'disk_size': disk_size,
        'disk_path': disk_path,
        'cloud_init': cloud_init,
        'associate_floating_ip': associate_floating_ip,
        'associate_floating_ip_subnet': associate_floating_ip_subnet,
        'project_id': project_id,
        'bare_metal': bare_metal,
        'tags': tags,
        'hourly': hourly,
        'schedule': schedule,
        'softlayer_backend_vlan_id': softlayer_backend_vlan_id,
        'size_ram': size_ram,
        'size_cpu': size_cpu,
        'size_disk_primary': size_disk_primary,
        'size_disk_swap': size_disk_swap,
        'boot': boot,
        'build': build,
        'cpu_priority': cpu_priority,
        'cpu_sockets': cpu_sockets,
        'cpu_threads': cpu_threads,
        'port_speed': port_speed,
        'hypervisor_group_id': hypervisor_group_id
    }
    if not async:
        ret = methods.create_machine(auth_context.owner, *args, **kwargs)
    else:
        args = (auth_context.owner.id, ) + args
        kwargs.update({'quantity': quantity, 'persist': persist})
        tasks.create_machine_async.apply_async(args, kwargs, countdown=2)
        ret = {'job_id': job_id}
    ret.update({'job': job})
    return ret
Example #13
0
def machine_actions(request):
    """
    Call an action on machine
    Calls a machine action on cloud that support it
    READ permission required on cloud.
    ACTION permission required on machine(ACTION can be START,
    STOP, DESTROY, REBOOT).
    ---
    machine:
      in: path
      required: true
      type: string
    action:
      enum:
      - start
      - stop
      - reboot
      - destroy
      - resize
      - rename
      required: true
      type: string
    name:
      description: The new name of the renamed machine
      type: string
    size:
      description: The size id of the plan to resize
      type: string
    """
    cloud_id = request.matchdict.get('cloud')
    params = params_from_request(request)
    action = params.get('action', '')
    plan_id = params.get('plan_id', '')
    name = params.get('name', '')
    auth_context = auth_context_from_request(request)

    if cloud_id:
        # this is depracated, keep it for backwards compatibility
        machine_id = request.matchdict['machine']
        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine']
        try:
            machine = Machine.objects.get(id=machine_uuid,
                                          state__ne='terminated')
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    if machine.cloud.owner != auth_context.owner:
        raise NotFoundError("Machine %s doesn't exist" % machine.id)

    auth_context.check_perm("machine", action, machine.id)

    actions = ('start', 'stop', 'reboot', 'destroy', 'resize', 'rename',
               'undefine', 'suspend', 'resume')

    if action not in actions:
        raise BadRequestError("Action '%s' should be "
                              "one of %s" % (action, actions))
    if action == 'destroy':
        methods.destroy_machine(auth_context.owner, cloud_id,
                                machine.machine_id)
    elif action in ('start', 'stop', 'reboot', 'undefine', 'suspend',
                    'resume'):
        getattr(machine.ctl, action)()
    elif action == 'rename':
        if not name:
            raise BadRequestError("You must give a name!")
        getattr(machine.ctl, action)(name)
    elif action == 'resize':
        getattr(machine.ctl, action)(plan_id)

    # TODO: We shouldn't return list_machines, just OK. Save the API!
    return methods.filter_list_machines(auth_context, cloud_id)
Example #14
0
    def _update__preparse_machines(self, auth_context, kwargs):
        """Preparse machines arguments to `self.update`

        This is called by `self.update` when adding a new schedule,
        in order to apply pre processing to the given params. Any subclass
        that requires any special pre processing of the params passed to
        `self.update`, SHOULD override this method.

        Params:
        kwargs: A dict of the keyword arguments that will be set as attributes
            to the `Schedule` model instance stored in `self.schedule`.
            This method is expected to modify `kwargs` in place and set the
            specific field of each scheduler.

        Subclasses MAY override this method.

        """
        sel_cls = {
            'tags': TaggingSelector,
            'machines': GenericResourceSelector,
            'field': FieldSelector,
            'age': MachinesAgeSelector
        }

        if kwargs.get('selectors'):
            self.schedule.selectors = []
        for selector in kwargs.get('selectors', []):
            if selector.get('type') not in sel_cls:
                raise BadRequestError()
            if selector['type'] == 'field':
                if selector['field'] not in ('created', 'state',
                                             'cost__monthly'):
                    raise BadRequestError()
            sel = sel_cls[selector.get('type')]()
            sel.update(**selector)
            self.schedule.selectors.append(sel)

        action = kwargs.get('action')

        # check permissions
        check = False
        for selector in self.schedule.selectors:
            if selector.ctype == 'machines':
                for mid in selector.ids:
                    try:
                        machine = Machine.objects.get(id=mid,
                                                      state__ne='terminated')
                    except Machine.DoesNotExist:
                        raise NotFoundError('Machine state is terminated')

                    # SEC require permission READ on cloud
                    auth_context.check_perm("cloud", "read", machine.cloud.id)

                    if action and action not in ['notify']:
                        # SEC require permission ACTION on machine
                        auth_context.check_perm("machine", action, mid)
                    else:
                        # SEC require permission RUN_SCRIPT on machine
                        auth_context.check_perm("machine", "run_script", mid)
                check = True
            elif selector.ctype == 'tags':
                if action and action not in ['notify']:
                    # SEC require permission ACTION on machine
                    auth_context.check_perm("machine", action, None)
                else:
                    # SEC require permission RUN_SCRIPT on machine
                    auth_context.check_perm("machine", "run_script", None)
                check = True
        if not check:
            raise BadRequestError("Specify at least machine ids or tags")

        return
Example #15
0
def machine_actions(request):
    """
    Tags: machines
    ---
    Calls a machine action on cloud that supports it.
    READ permission required on cloud.
    ACTION permission required on machine(ACTION can be START,
    STOP, DESTROY, REBOOT or RESIZE, RENAME for some providers).
    ---
    machine_uuid:
      in: path
      required: true
      type: string
    action:
      enum:
      - start
      - stop
      - reboot
      - destroy
      - remove
      - resize
      - rename
      - create_snapshot
      - remove_snapshot
      - revert_to_snapshot
      required: true
      type: string
    name:
      description: The new name of the renamed machine
      type: string
    size:
      description: The size id of the plan to resize
      type: string
    snapshot_name:
      description: The name of the snapshot to create/remove/revert_to
    snapshot_description:
      description: The description of the snapshot to create
    snapshot_dump_memory:
      description: Dump the machine's memory in the snapshot
      default: false
    snapshot_quiesce:
      description: Enable guest file system quiescing
      default: false
    """
    cloud_id = request.matchdict.get('cloud')
    params = params_from_request(request)
    action = params.get('action', '')
    name = params.get('name', '')
    size_id = params.get('size', '')
    memory = params.get('memory', '')
    cpus = params.get('cpus', '')
    cpu_shares = params.get('cpu_shares', '')
    cpu_units = params.get('cpu_units', '')
    snapshot_name = params.get('snapshot_name')
    snapshot_description = params.get('snapshot_description')
    snapshot_dump_memory = params.get('snapshot_dump_memory')
    snapshot_quiesce = params.get('snapshot_quiesce')
    auth_context = auth_context_from_request(request)

    if cloud_id:
        machine_id = request.matchdict['machine']
        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_uuid'] = machine.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine_uuid']
        try:
            machine = Machine.objects.get(id=machine_uuid)
            # VMs in libvirt can be started no matter if they are terminated
            if machine.state == 'terminated' and not isinstance(
                    machine.cloud, LibvirtCloud):
                raise NotFoundError("Machine %s has been terminated" %
                                    machine_uuid)
            # used by logging_view_decorator
            request.environ['machine_id'] = machine.machine_id
            request.environ['cloud_id'] = machine.cloud.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    if machine.cloud.owner != auth_context.owner:
        raise NotFoundError("Machine %s doesn't exist" % machine.id)

    auth_context.check_perm("machine", action, machine.id)

    actions = ('start', 'stop', 'reboot', 'destroy', 'resize', 'rename',
               'undefine', 'suspend', 'resume', 'remove', 'list_snapshots',
               'create_snapshot', 'remove_snapshot', 'revert_to_snapshot',
               'clone')

    if action not in actions:
        raise BadRequestError("Action '%s' should be "
                              "one of %s" % (action, actions))

    if not methods.run_pre_action_hooks(machine, action, auth_context.user):
        return OK  # webhook requires stopping action propagation

    if action == 'destroy':
        result = methods.destroy_machine(auth_context.owner, cloud_id,
                                         machine.machine_id)
    elif action == 'remove':
        log.info('Removing machine %s in cloud %s' %
                 (machine.machine_id, cloud_id))

        # if machine has monitoring, disable it
        if machine.monitoring.hasmonitoring:
            try:
                disable_monitoring(auth_context.owner,
                                   cloud_id,
                                   machine_id,
                                   no_ssh=True)
            except Exception as exc:
                log.warning("Didn't manage to disable monitoring, maybe the "
                            "machine never had monitoring enabled. Error: %r" %
                            exc)
        result = machine.ctl.remove()
        # Schedule a UI update
        trigger_session_update(auth_context.owner, ['clouds'])
    elif action in ('start', 'stop', 'reboot', 'clone', 'undefine', 'suspend',
                    'resume'):
        result = getattr(machine.ctl, action)()
    elif action == 'rename':
        if not name:
            raise BadRequestError("You must give a name!")
        result = getattr(machine.ctl, action)(name)
    elif action == 'resize':
        _, constraints = auth_context.check_perm("machine", "resize",
                                                 machine.id)
        # check cost constraint
        cost_constraint = constraints.get('cost', {})
        if cost_constraint:
            try:
                from mist.rbac.methods import check_cost
                check_cost(auth_context.org, cost_constraint)
            except ImportError:
                pass
        kwargs = {}
        if memory:
            kwargs['memory'] = memory
        if cpus:
            kwargs['cpus'] = cpus
        if cpu_shares:
            kwargs['cpu_shares'] = cpu_shares
        if cpu_units:
            kwargs['cpu_units'] = cpu_units
        result = getattr(machine.ctl, action)(size_id, kwargs)
    elif action == 'list_snapshots':
        return machine.ctl.list_snapshots()
    elif action in ('create_snapshot', 'remove_snapshot',
                    'revert_to_snapshot'):
        kwargs = {}
        if snapshot_description:
            kwargs['description'] = snapshot_description
        if snapshot_dump_memory:
            kwargs['dump_memory'] = bool(snapshot_dump_memory)
        if snapshot_quiesce:
            kwargs['quiesce'] = bool(snapshot_quiesce)
        result = getattr(machine.ctl, action)(snapshot_name, **kwargs)

    methods.run_post_action_hooks(machine, action, auth_context.user, result)

    # TODO: We shouldn't return list_machines, just OK. Save the API!
    return methods.filter_list_machines(auth_context, cloud_id)
Example #16
0
    def add_machine(self, host, ssh_user='******', ssh_port=22, ssh_key=None,
                    **kwargs):
        try:
            ssh_port = int(ssh_port)
        except (ValueError, TypeError):
            ssh_port = 22

        if not ssh_key:
            raise RequiredParameterMissingError('machine_key')

        try:
            ssh_key = Key.objects.get(owner=self.cloud.owner, id=ssh_key,
                                      deleted=None)
        except Key.DoesNotExist:
            raise NotFoundError("Key does not exist.")

        images_location = kwargs.get('images_location',
                                     '/var/lib/libvirt/images')
        extra = {
            'images_location': images_location,
            'tags': {'type': 'hypervisor'},
            'username': ssh_user
        }

        from mist.api.machines.models import Machine
        # Create and save machine entry to database.
        # first check if the host has already been added to the cloud
        try:
            machine = Machine.objects.get(cloud=self.cloud,
                                          machine_id=host.replace('.', '-'))
            machine.name = kwargs.get('name') or host
            machine.ssh_port = ssh_port
            machine.extra = extra
            machine.last_seen = datetime.datetime.utcnow()
            machine.missing_since = None
        except me.DoesNotExist:
            machine = Machine(
                cloud=self.cloud,
                name=kwargs.get('name') or host,
                hostname=host,
                machine_id=host.replace('.', '-'),
                ssh_port=ssh_port,
                extra=extra,
                state=NodeState.RUNNING,
                last_seen=datetime.datetime.utcnow(),
            )

        # Sanitize inputs.
        host = sanitize_host(host)
        check_host(host)
        machine.hostname = host

        if is_private_subnet(socket.gethostbyname(host)):
            machine.private_ips = [host]
        else:
            machine.public_ips = [host]

        machine.save(write_concern={'w': 1, 'fsync': True})

        # associate key and attempt to connect
        try:
            machine.ctl.associate_key(ssh_key,
                                      username=ssh_user,
                                      port=ssh_port)
        except MachineUnauthorizedError as exc:
            log.error("Could not connect to host %s."
                      % host)
            machine.delete()
            raise CloudUnauthorizedError(exc)
        except ServiceUnavailableError as exc:
            log.error("Could not connect to host %s."
                      % host)
            machine.delete()
            raise MistError("Couldn't connect to host '%s'."
                            % host)

        if amqp_owner_listening(self.cloud.owner.id):
            old_machines = []
            for cached_machine in \
                    self.cloud.ctl.compute.list_cached_machines():
                # make sure that host just added becomes visible
                if cached_machine.id != machine.id:
                    old_machines.append(cached_machine)
            old_machines = [m.as_dict() for m in
                            old_machines]
            new_machines = self.cloud.ctl.compute.list_machines()
            self.cloud.ctl.compute.produce_and_publish_patch(
                old_machines, new_machines)

        return machine
Example #17
0
def machine_rdp(request):
    """
    Tags: machines
    ---
    Rdp file for windows machines.
    Generates and returns an rdp file for windows machines.
    READ permission required on cloud.
    READ permission required on machine.
    ---
    cloud:
      in: path
      required: true
      type: string
    machine:
      in: path
      required: true
      type: string
    rdp_port:
      default: 3389
      in: query
      required: true
      type: integer
    host:
      in: query
      required: true
      type: string
    """
    cloud_id = request.matchdict.get('cloud')

    auth_context = auth_context_from_request(request)

    if cloud_id:
        machine_id = request.matchdict['machine']
        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_uuid'] = machine.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine_uuid']
        try:
            machine = Machine.objects.get(id=machine_uuid,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_id'] = machine.machine_id
            request.environ['cloud_id'] = machine.cloud.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    auth_context.check_perm("machine", "read", machine.id)
    rdp_port = request.params.get('rdp_port', 3389)
    host = request.params.get('host')

    if not host:
        raise BadRequestError('No hostname specified')
    try:
        1 < int(rdp_port) < 65535
    except (ValueError, TypeError):
        rdp_port = 3389

    host, rdp_port = dnat(auth_context.owner, host, rdp_port)

    rdp_content = 'full address:s:%s:%s\nprompt for credentials:i:1' % \
                  (host, rdp_port)
    return Response(content_type='application/octet-stream',
                    content_disposition='attachment; filename="%s.rdp"' % host,
                    charset='utf8',
                    pragma='no-cache',
                    body=rdp_content)
Example #18
0
def delete_scripts(request):
    """
    Delete multiple scripts.
    Provide a list of script ids to be deleted. The method will try to delete
    all of them and then return a json that describes for each script id
    whether or not it was deleted or the not_found if the script id could not
    be located. If no script id was found then a 404(Not Found) response will
    be returned.
    REMOVE permission required on each script.
    ---
    script_ids:
      required: true
      type: array
      items:
        type: string
        name: script_id
    """
    auth_context = auth_context_from_request(request)
    params = params_from_request(request)
    script_ids = params.get('script_ids', [])
    if type(script_ids) != list or len(script_ids) == 0:
        raise RequiredParameterMissingError('No script ids provided')

    # remove duplicate ids if there are any
    script_ids = sorted(script_ids)
    i = 1
    while i < len(script_ids):
        if script_ids[i] == script_ids[i - 1]:
            script_ids = script_ids[:i] + script_ids[i + 1:]
        else:
            i += 1

    report = {}
    for script_id in script_ids:
        try:
            script = Script.objects.get(owner=auth_context.owner,
                                        id=script_id,
                                        deleted=None)
        except me.DoesNotExist:
            report[script_id] = 'not_found'
            continue
        # SEC require REMOVE permission on script
        try:
            auth_context.check_perm('script', 'remove', script_id)
        except PolicyUnauthorizedError:
            report[script_id] = 'unauthorized'
        else:
            script.ctl.delete()
            report[script_id] = 'deleted'
        # /SEC

    # if no script id was valid raise exception
    if len(filter(lambda script_id: report[script_id] == 'not_found',
                  report)) == len(script_ids):
        raise NotFoundError('No valid script id provided')
    # if user was not authorized for any script raise exception
    if len(
            filter(lambda script_id: report[script_id] == 'unauthorized',
                   report)) == len(script_ids):
        raise UnauthorizedError("You don't have authorization for any of these"
                                " scripts")
    return report