Ejemplo n.º 1
0
def machine_actions(request):
    """
    Tags: machines
    ---
    Calls a machine action on cloud that supports it.
    READ permission required on cloud.
    ACTION permission required on machine(ACTION can be START,
    STOP, DESTROY, REBOOT or RESIZE, RENAME for some providers).
    ---
    machine_uuid:
      in: path
      required: true
      type: string
    action:
      enum:
      - start
      - stop
      - reboot
      - destroy
      - resize
      - rename
      required: true
      type: string
    name:
      description: The new name of the renamed machine
      type: string
    size:
      description: The size id of the plan to resize
      type: string
    """
    cloud_id = request.matchdict.get('cloud')
    params = params_from_request(request)
    action = params.get('action', '')
    size_id = params.get('size', params.get('plan_id', ''))
    memory = params.get('memory', '')
    cpus = params.get('cpus', '')
    cpu_shares = params.get('cpu_shares', '')
    cpu_units = params.get('cpu_units', '')
    name = params.get('name', '')
    auth_context = auth_context_from_request(request)

    if cloud_id:
        # this is depracated, keep it for backwards compatibility
        machine_id = request.matchdict['machine']
        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_uuid'] = machine.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine_uuid']
        try:
            machine = Machine.objects.get(id=machine_uuid)
            # VMs in libvirt can be started no matter if they are terminated
            if machine.state == 'terminated' and not isinstance(
                    machine.cloud, LibvirtCloud):
                raise NotFoundError("Machine %s has been terminated" %
                                    machine_uuid)
            # used by logging_view_decorator
            request.environ['machine_id'] = machine.machine_id
            request.environ['cloud_id'] = machine.cloud.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    if machine.cloud.owner != auth_context.owner:
        raise NotFoundError("Machine %s doesn't exist" % machine.id)

    auth_context.check_perm("machine", action, machine.id)

    actions = ('start', 'stop', 'reboot', 'destroy', 'resize', 'rename',
               'undefine', 'suspend', 'resume', 'remove')

    if action not in actions:
        raise BadRequestError("Action '%s' should be "
                              "one of %s" % (action, actions))
    if action == 'destroy':
        methods.destroy_machine(auth_context.owner, cloud_id,
                                machine.machine_id)
    elif action == 'remove':
        log.info('Removing machine %s in cloud %s' %
                 (machine.machine_id, cloud_id))

        if not machine.monitoring.hasmonitoring:
            machine.ctl.remove()
            # Schedule a UI update
            trigger_session_update(auth_context.owner, ['clouds'])
            return

        # if machine has monitoring, disable it. the way we disable depends on
        # whether this is a standalone io installation or not
        try:
            disable_monitoring(auth_context.owner,
                               cloud_id,
                               machine_id,
                               no_ssh=True)
        except Exception as exc:
            log.warning(
                "Didn't manage to disable monitoring, maybe the "
                "machine never had monitoring enabled. Error: %r", exc)

        machine.ctl.remove()

        # Schedule a UI update
        trigger_session_update(auth_context.owner, ['clouds'])

    elif action in ('start', 'stop', 'reboot', 'undefine', 'suspend',
                    'resume'):
        getattr(machine.ctl, action)()
    elif action == 'rename':
        if not name:
            raise BadRequestError("You must give a name!")
        getattr(machine.ctl, action)(name)
    elif action == 'resize':
        kwargs = {}
        if memory:
            kwargs['memory'] = memory
        if cpus:
            kwargs['cpus'] = cpus
        if cpu_shares:
            kwargs['cpu_shares'] = cpu_shares
        if cpu_units:
            kwargs['cpu_units'] = cpu_units
        getattr(machine.ctl, action)(size_id, kwargs)

    # TODO: We shouldn't return list_machines, just OK. Save the API!
    return methods.filter_list_machines(auth_context, cloud_id)
Ejemplo n.º 2
0
def create_machine(request):
    """
    Create machine(s) on cloud
    Creates one or more machines on the specified cloud. If async is true, a
    jobId will be returned.
    READ permission required on cloud.
    CREATE_RESOURCES permissn required on cloud.
    CREATE permission required on machine.
    RUN permission required on script.
    READ permission required on key.

    ---
    cloud:
      in: path
      required: true
      type: string
    async:
      description: ' Create machines asynchronously, returning a jobId'
      type: boolean
    quantity:
      description: ' The number of machines that will be created, async only'
      type: integer
    azure_port_bindings:
      type: string
    cloud_id:
      description: The Cloud ID
      required: true
      type: string
    disk:
      description: ' Only required by Linode cloud'
      type: string
    docker_command:
      type: string
    docker_env:
      items:
        type: string
      type: array
    docker_exposed_ports:
      type: object
    docker_port_bindings:
      type: object
    hostname:
      type: string
    image_extra:
      description: ' Needed only by Linode cloud'
      type: string
    image:
      description: ' Id of image to be used with the creation'
      required: true
      type: string
    image_name:
      type: string
    ips:
      type: string
    job_id:
      type: string
    key_id:
      description: ' Associate machine with this key_id'
      required: true
      type: string
    location_id:
      description: ' Id of the cloud''s location to create the machine'
      required: true
      type: string
    location_name:
      type: string
    machine_name:
      required: true
      type: string
    monitoring:
      type: string
    networks:
      items:
        type: string
      type: array
    plugins:
      items:
        type: string
      type: array
    post_script_id:
      type: string
    post_script_params:
      type: string
    script:
      type: string
    script_id:
      type: string
    script_params:
      type: string
    size_id:
      description: ' Id of the size of the machine'
      required: true
      type: string
    size_name:
      type: string
    ssh_port:
      type: integer
    softlayer_backend_vlan_id:
      description: 'Specify id of a backend(private) vlan'
      type: integer
    project_id:
      description: ' Needed only by Packet.net cloud'
      type: string
    billing:
      description: ' Needed only by SoftLayer cloud'
      type: string
    bare_metal:
      description: ' Needed only by SoftLayer cloud'
      type: string
    schedule:
      type: dict
    """

    params = params_from_request(request)
    cloud_id = request.matchdict['cloud']

    for key in ('name', 'size'):
        if key not in params:
            raise RequiredParameterMissingError(key)

    key_id = params.get('key')
    machine_name = params['name']
    location_id = params.get('location', None)
    image_id = params.get('image')
    if not image_id:
        raise RequiredParameterMissingError("image")
    # this is used in libvirt
    disk_size = int(params.get('libvirt_disk_size', 4))
    disk_path = params.get('libvirt_disk_path', '')
    size_id = params['size']
    # deploy_script received as unicode, but ScriptDeployment wants str
    script = str(params.get('script', ''))
    # these are required only for Linode/GCE, passing them anyway
    image_extra = params.get('image_extra', None)
    disk = params.get('disk', None)
    image_name = params.get('image_name', None)
    size_name = params.get('size_name', None)
    location_name = params.get('location_name', None)
    ips = params.get('ips', None)
    monitoring = params.get('monitoring', False)
    networks = params.get('networks', [])
    docker_env = params.get('docker_env', [])
    docker_command = params.get('docker_command', None)
    script_id = params.get('script_id', '')
    script_params = params.get('script_params', '')
    post_script_id = params.get('post_script_id', '')
    post_script_params = params.get('post_script_params', '')
    async = params.get('async', False)
    quantity = params.get('quantity', 1)
    persist = params.get('persist', False)
    docker_port_bindings = params.get('docker_port_bindings', {})
    docker_exposed_ports = params.get('docker_exposed_ports', {})
    azure_port_bindings = params.get('azure_port_bindings', '')
    # hostname: if provided it will be attempted to assign a DNS name
    hostname = params.get('hostname', '')
    plugins = params.get('plugins')
    cloud_init = params.get('cloud_init', '')
    associate_floating_ip = params.get('associate_floating_ip', False)
    associate_floating_ip_subnet = params.get('attach_floating_ip_subnet',
                                              None)
    project_id = params.get('project', None)
    bare_metal = params.get('bare_metal', False)
    # bare_metal True creates a hardware server in SoftLayer,
    # whule bare_metal False creates a virtual cloud server
    # hourly True is the default setting for SoftLayer hardware
    # servers, while False means the server has montly pricing
    softlayer_backend_vlan_id = params.get('softlayer_backend_vlan_id', None)
    hourly = params.get('billing', True)
    job_id = params.get('job_id')
    job_id = params.get('job_id')
    # The `job` variable points to the event that started the job. If a job_id
    # is not provided, then it means that this is the beginning of a new story
    # that starts with a `create_machine` event. If a job_id is provided that
    # means that the current event will be part of already existing, unknown
    # story. TODO: Provide the `job` in the request's params or query it.
    if not job_id:
        job = 'create_machine'
        job_id = uuid.uuid4().hex
    else:
        job = None

    # these are needed for OnApp
    size_ram = params.get('size_ram', 256)
    size_cpu = params.get('size_cpu', 1)
    size_disk_primary = params.get('size_disk_primary', 5)
    size_disk_swap = params.get('size_disk_swap', 1)
    boot = params.get('boot', True)
    build = params.get('build', True)
    cpu_priority = params.get('cpu_priority', 1)
    cpu_sockets = params.get('cpu_sockets', 1)
    cpu_threads = params.get('cpu_threads', 1)
    port_speed = params.get('port_speed', 0)
    hypervisor_group_id = params.get('hypervisor_group_id')

    auth_context = auth_context_from_request(request)

    try:
        Cloud.objects.get(owner=auth_context.owner, id=cloud_id, deleted=None)
    except Cloud.DoesNotExist:
        raise NotFoundError('Cloud does not exist')

    # compose schedule as a dict from relative parameters
    if not params.get('schedule_type'):
        schedule = {}
    else:
        if params.get('schedule_type') not in [
                'crontab', 'interval', 'one_off'
        ]:
            raise BadRequestError('schedule type must be one of '
                                  'these (crontab, interval, one_off)]')
        if params.get('schedule_entry') == {}:
            raise RequiredParameterMissingError('schedule_entry')

        schedule = {
            'name': params.get('name'),
            'description': params.get('description', ''),
            'action': params.get('action', ''),
            'script_id': params.get('schedule_script_id', ''),
            'schedule_type': params.get('schedule_type'),
            'schedule_entry': params.get('schedule_entry'),
            'expires': params.get('expires', ''),
            'start_after': params.get('start_after', ''),
            'max_run_count': params.get('max_run_count'),
            'task_enabled': bool(params.get('task_enabled', True)),
            'auth_context': auth_context.serialize(),
        }

    auth_context.check_perm("cloud", "read", cloud_id)
    auth_context.check_perm("cloud", "create_resources", cloud_id)
    tags = auth_context.check_perm("machine", "create", None) or {}
    if script_id:
        auth_context.check_perm("script", "run", script_id)
    if key_id:
        auth_context.check_perm("key", "read", key_id)

    # Parse tags.
    try:
        mtags = params.get('tags') or {}
        if not isinstance(mtags, dict):
            if not isinstance(mtags, list):
                raise ValueError()
            if not all((isinstance(t, dict) and len(t) is 1 for t in mtags)):
                raise ValueError()
            mtags = {key: val for item in mtags for key, val in item.items()}
        tags.update(mtags)
    except ValueError:
        raise BadRequestError('Invalid tags format. Expecting either a '
                              'dictionary of tags or a list of single-item '
                              'dictionaries')

    args = (cloud_id, key_id, machine_name, location_id, image_id, size_id,
            image_extra, disk, image_name, size_name, location_name, ips,
            monitoring, networks, docker_env, docker_command)
    kwargs = {
        'script_id': script_id,
        'script_params': script_params,
        'script': script,
        'job': job,
        'job_id': job_id,
        'docker_port_bindings': docker_port_bindings,
        'docker_exposed_ports': docker_exposed_ports,
        'azure_port_bindings': azure_port_bindings,
        'hostname': hostname,
        'plugins': plugins,
        'post_script_id': post_script_id,
        'post_script_params': post_script_params,
        'disk_size': disk_size,
        'disk_path': disk_path,
        'cloud_init': cloud_init,
        'associate_floating_ip': associate_floating_ip,
        'associate_floating_ip_subnet': associate_floating_ip_subnet,
        'project_id': project_id,
        'bare_metal': bare_metal,
        'tags': tags,
        'hourly': hourly,
        'schedule': schedule,
        'softlayer_backend_vlan_id': softlayer_backend_vlan_id,
        'size_ram': size_ram,
        'size_cpu': size_cpu,
        'size_disk_primary': size_disk_primary,
        'size_disk_swap': size_disk_swap,
        'boot': boot,
        'build': build,
        'cpu_priority': cpu_priority,
        'cpu_sockets': cpu_sockets,
        'cpu_threads': cpu_threads,
        'port_speed': port_speed,
        'hypervisor_group_id': hypervisor_group_id
    }
    if not async:
        ret = methods.create_machine(auth_context.owner, *args, **kwargs)
    else:
        args = (auth_context.owner.id, ) + args
        kwargs.update({'quantity': quantity, 'persist': persist})
        tasks.create_machine_async.apply_async(args, kwargs, countdown=2)
        ret = {'job_id': job_id}
    ret.update({'job': job})
    return ret
Ejemplo n.º 3
0
def create_machine(request):
    """
    Tags: machines
    ---
    Creates one or more machines on the specified cloud. If async is true, a
    jobId will be returned.
    READ permission required on cloud.
    CREATE_RESOURCES permission required on cloud.
    CREATE permission required on machine.
    RUN permission required on script.
    READ permission required on key.
    ---
    cloud:
      in: path
      required: true
      type: string
    name:
      type: string
      description: Name of the machine
      required: true
      example: "my-digital-ocean-machine"
    image:
      description: Provider's image id to be used on creation
      required: true
      type: string
      example: "17384153"
    size:
      type: string
      description: Provider's size id to be used on creation
      example: "512mb"
    location:
      type: string
      description: Mist internal location id
      example: "3462b4dfbb434986a7dac362789bc402"
    key:
      description: Associate machine with this key. Mist internal key id
      type: string
      example: "da1df7d0402043b9a9c786b100992888"
    monitoring:
      type: boolean
      description: Enable monitoring on the machine
      example: false
    async:
      description: Create machine asynchronously, returning a jobId
      type: boolean
      example: false
    cloud_init:
      description: Cloud Init script
      type: string
    networks:
      type: array
      items:
        type: string
    subnet_id:
      type: string
      description: Optional for EC2
    subnetwork:
      type: string
    schedule:
      type: object
    script:
      type: string
    script_id:
      type: string
      example: "e7ac65fb4b23453486778585616b2bb8"
    script_params:
      type: string
    plugins:
      type: array
      items:
        type: string
    post_script_id:
      type: string
    post_script_params:
      type: string
    associate_floating_ip:
      type: boolean
      description: Required for Openstack. Either 'true' or 'false'
    azure_port_bindings:
      type: string
      description: Required for Azure
    create_network:
      type: boolean
      description: Required for Azure_arm
    create_resource_group:
      type: boolean
      description: Required for Azure_arm
    create_storage_account:
      type: boolean
      description: Required for Azure_arm
    ex_storage_account:
      type: string
      description: Required for Azure_arm if not create_storage_account
    ex_resource_group:
      type: string
      description: Required for Azure_arm if not create_resource_group
    machine_password:
      type: string
      description: Required for Azure_arm
    machine_username:
      type: string
      description: Required for Azure_arm
    new_network:
      type: string
      description: Required for Azure_arm if create_storage_account
    new_storage_account:
      type: string
      description: Required for Azure_arm if create_storage_account
    new_resource_group:
      type: string
      description: Required for Azure_arm if create_resource_group
    bare_metal:
      description: Needed only by SoftLayer cloud
      type: boolean
    billing:
      description: Needed only by SoftLayer cloud
      type: string
      example: "hourly"
    boot:
      description: Required for OnApp
      type: boolean
    build:
      description: Required for OnApp
      type: boolean
    docker_command:
      type: string
    docker_env:
      type: array
      items:
        type: string
    docker_exposed_ports:
      type: object
    docker_port_bindings:
      type: object
    project_id:
      description: ' Needed only by Packet cloud'
      type: string
    softlayer_backend_vlan_id:
      description: 'Specify id of a backend(private) vlan'
      type: integer
    ssh_port:
      type: integer
      example: 22
    """

    params = params_from_request(request)
    cloud_id = request.matchdict['cloud']
    for key in ('name', 'size'):
        if key not in params:
            raise RequiredParameterMissingError(key)

    key_id = params.get('key')
    machine_name = params['name']
    location_id = params.get('location', None)
    image_id = params.get('image')
    if not image_id:
        raise RequiredParameterMissingError("image")
    # this is used in libvirt
    disk_size = int(params.get('libvirt_disk_size', 4))
    disk_path = params.get('libvirt_disk_path', '')
    size = params.get('size', None)
    # deploy_script received as unicode, but ScriptDeployment wants str
    script = str(params.get('script', ''))
    # these are required only for Linode/GCE, passing them anyway
    image_extra = params.get('image_extra', None)
    disk = params.get('disk', None)
    image_name = params.get('image_name', None)
    size_name = params.get('size_name', None)
    location_name = params.get('location_name', None)
    ips = params.get('ips', None)
    monitoring = params.get('monitoring', False)
    create_storage_account = params.get('create_storage_account', False)
    new_storage_account = params.get('new_storage_account', '')
    ex_storage_account = params.get('ex_storage_account', '')
    machine_password = params.get('machine_password', '')
    machine_username = params.get('machine_username', '')
    create_resource_group = params.get('create_resource_group', False)
    new_resource_group = params.get('new_resource_group', '')
    ex_resource_group = params.get('ex_resource_group', '')
    create_network = params.get('create_network', False)
    new_network = params.get('new_network', '')
    networks = params.get('networks', [])
    subnet_id = params.get('subnet_id', '')
    subnetwork = params.get('subnetwork', None)
    docker_env = params.get('docker_env', [])
    docker_command = params.get('docker_command', None)
    script_id = params.get('script_id', '')
    script_params = params.get('script_params', '')
    post_script_id = params.get('post_script_id', '')
    post_script_params = params.get('post_script_params', '')
    async = params.get('async', False)
    quantity = params.get('quantity', 1)
    persist = params.get('persist', False)
    docker_port_bindings = params.get('docker_port_bindings', {})
    docker_exposed_ports = params.get('docker_exposed_ports', {})
    azure_port_bindings = params.get('azure_port_bindings', '')
    # hostname: if provided it will be attempted to assign a DNS name
    hostname = params.get('hostname', '')
    plugins = params.get('plugins')
    cloud_init = params.get('cloud_init', '')
    associate_floating_ip = params.get('associate_floating_ip', False)
    associate_floating_ip_subnet = params.get('attach_floating_ip_subnet',
                                              None)
    project_id = params.get('project', None)
    bare_metal = params.get('bare_metal', False)
    # bare_metal True creates a hardware server in SoftLayer,
    # whule bare_metal False creates a virtual cloud server
    # hourly True is the default setting for SoftLayer hardware
    # servers, while False means the server has montly pricing
    softlayer_backend_vlan_id = params.get('softlayer_backend_vlan_id', None)
    hourly = params.get('hourly', True)

    job_id = params.get('job_id')
    # The `job` variable points to the event that started the job. If a job_id
    # is not provided, then it means that this is the beginning of a new story
    # that starts with a `create_machine` event. If a job_id is provided that
    # means that the current event will be part of already existing, unknown
    # story. TODO: Provide the `job` in the request's params or query it.
    if not job_id:
        job = 'create_machine'
        job_id = uuid.uuid4().hex
    else:
        job = None

    auth_context = auth_context_from_request(request)

    try:
        cloud = Cloud.objects.get(owner=auth_context.owner,
                                  id=cloud_id,
                                  deleted=None)
    except Cloud.DoesNotExist:
        raise NotFoundError('Cloud does not exist')

    # FIXME For backwards compatibility.
    if cloud.ctl.provider in (
            'vsphere',
            'onapp',
            'libvirt',
    ):
        if not size or not isinstance(size, dict):
            size = {}
        for param in (
                'size_ram',
                'size_cpu',
                'size_disk_primary',
                'size_disk_swap',
                'boot',
                'build',
                'cpu_priority',
                'cpu_sockets',
                'cpu_threads',
                'port_speed',
                'hypervisor_group_id',
        ):
            if param in params and params[param]:
                size[param.replace('size_', '')] = params[param]

    # compose schedule as a dict from relative parameters
    if not params.get('schedule_type'):
        schedule = {}
    else:
        if params.get('schedule_type') not in [
                'crontab', 'interval', 'one_off'
        ]:
            raise BadRequestError('schedule type must be one of '
                                  'these (crontab, interval, one_off)]')
        if params.get('schedule_entry') == {}:
            raise RequiredParameterMissingError('schedule_entry')

        schedule = {
            'name': params.get('name'),
            'description': params.get('description', ''),
            'action': params.get('action', ''),
            'script_id': params.get('schedule_script_id', ''),
            'schedule_type': params.get('schedule_type'),
            'schedule_entry': params.get('schedule_entry'),
            'expires': params.get('expires', ''),
            'start_after': params.get('start_after', ''),
            'max_run_count': params.get('max_run_count'),
            'task_enabled': bool(params.get('task_enabled', True)),
            'auth_context': auth_context.serialize(),
        }

    auth_context.check_perm("cloud", "read", cloud_id)
    auth_context.check_perm("cloud", "create_resources", cloud_id)
    tags = auth_context.check_perm("machine", "create", None) or {}
    if script_id:
        auth_context.check_perm("script", "run", script_id)
    if key_id:
        auth_context.check_perm("key", "read", key_id)

    # Parse tags.
    try:
        mtags = params.get('tags') or {}
        if not isinstance(mtags, dict):
            if not isinstance(mtags, list):
                raise ValueError()
            if not all((isinstance(t, dict) and len(t) is 1 for t in mtags)):
                raise ValueError()
            mtags = {key: val for item in mtags for key, val in item.items()}
        tags.update(mtags)
    except ValueError:
        raise BadRequestError('Invalid tags format. Expecting either a '
                              'dictionary of tags or a list of single-item '
                              'dictionaries')

    args = (cloud_id, key_id, machine_name, location_id, image_id, size,
            image_extra, disk, image_name, size_name, location_name, ips,
            monitoring, ex_storage_account, machine_password,
            ex_resource_group, networks, subnetwork, docker_env,
            docker_command)
    kwargs = {
        'script_id': script_id,
        'script_params': script_params,
        'script': script,
        'job': job,
        'job_id': job_id,
        'docker_port_bindings': docker_port_bindings,
        'docker_exposed_ports': docker_exposed_ports,
        'azure_port_bindings': azure_port_bindings,
        'hostname': hostname,
        'plugins': plugins,
        'post_script_id': post_script_id,
        'post_script_params': post_script_params,
        'disk_size': disk_size,
        'disk_path': disk_path,
        'cloud_init': cloud_init,
        'subnet_id': subnet_id,
        'associate_floating_ip': associate_floating_ip,
        'associate_floating_ip_subnet': associate_floating_ip_subnet,
        'project_id': project_id,
        'bare_metal': bare_metal,
        'tags': tags,
        'hourly': hourly,
        'schedule': schedule,
        'softlayer_backend_vlan_id': softlayer_backend_vlan_id,
        'create_storage_account': create_storage_account,
        'new_storage_account': new_storage_account,
        'create_network': create_network,
        'new_network': new_network,
        'create_resource_group': create_resource_group,
        'new_resource_group': new_resource_group,
        'machine_username': machine_username
    }
    if not async:
        ret = methods.create_machine(auth_context, *args, **kwargs)
    else:
        args = (auth_context.serialize(), ) + args
        kwargs.update({'quantity': quantity, 'persist': persist})
        tasks.create_machine_async.apply_async(args, kwargs, countdown=2)
        ret = {'job_id': job_id}
    ret.update({'job': job})
    return ret
Ejemplo n.º 4
0
def machine_actions(request):
    """
    Call an action on machine
    Calls a machine action on cloud that support it
    READ permission required on cloud.
    ACTION permission required on machine(ACTION can be START,
    STOP, DESTROY, REBOOT).
    ---
    machine:
      in: path
      required: true
      type: string
    action:
      enum:
      - start
      - stop
      - reboot
      - destroy
      - resize
      - rename
      required: true
      type: string
    name:
      description: The new name of the renamed machine
      type: string
    size:
      description: The size id of the plan to resize
      type: string
    """
    cloud_id = request.matchdict.get('cloud')
    params = params_from_request(request)
    action = params.get('action', '')
    plan_id = params.get('plan_id', '')
    name = params.get('name', '')
    auth_context = auth_context_from_request(request)

    if cloud_id:
        # this is depracated, keep it for backwards compatibility
        machine_id = request.matchdict['machine']
        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine']
        try:
            machine = Machine.objects.get(id=machine_uuid,
                                          state__ne='terminated')
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    if machine.cloud.owner != auth_context.owner:
        raise NotFoundError("Machine %s doesn't exist" % machine.id)

    auth_context.check_perm("machine", action, machine.id)

    actions = ('start', 'stop', 'reboot', 'destroy', 'resize', 'rename',
               'undefine', 'suspend', 'resume')

    if action not in actions:
        raise BadRequestError("Action '%s' should be "
                              "one of %s" % (action, actions))
    if action == 'destroy':
        methods.destroy_machine(auth_context.owner, cloud_id,
                                machine.machine_id)
    elif action in ('start', 'stop', 'reboot', 'undefine', 'suspend',
                    'resume'):
        getattr(machine.ctl, action)()
    elif action == 'rename':
        if not name:
            raise BadRequestError("You must give a name!")
        getattr(machine.ctl, action)(name)
    elif action == 'resize':
        getattr(machine.ctl, action)(plan_id)

    # TODO: We shouldn't return list_machines, just OK. Save the API!
    return methods.filter_list_machines(auth_context, cloud_id)
Ejemplo n.º 5
0
def machine_rdp(request):
    """
    Rdp file for windows machines
    Generate and return an rdp file for windows machines
    READ permission required on cloud.
    READ permission required on machine.
    ---
    cloud:
      in: path
      required: true
      type: string
    machine:
      in: path
      required: true
      type: string
    rdp_port:
      default: 3389
      in: query
      required: true
      type: integer
    host:
      in: query
      required: true
      type: string
    """
    cloud_id = request.matchdict.get('cloud')

    auth_context = auth_context_from_request(request)

    if cloud_id:
        # this is depracated, keep it for backwards compatibility
        machine_id = request.matchdict['machine']
        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine']
        try:
            machine = Machine.objects.get(id=machine_uuid,
                                          state__ne='terminated')
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    auth_context.check_perm("machine", "read", machine.id)
    rdp_port = request.params.get('rdp_port', 3389)
    host = request.params.get('host')

    if not host:
        raise BadRequestError('No hostname specified')
    try:
        1 < int(rdp_port) < 65535
    except:
        rdp_port = 3389

    host, rdp_port = dnat(auth_context.owner, host, rdp_port)

    rdp_content = 'full address:s:%s:%s\nprompt for credentials:i:1' % \
                  (host, rdp_port)
    return Response(content_type='application/octet-stream',
                    content_disposition='attachment; filename="%s.rdp"' % host,
                    charset='utf8',
                    pragma='no-cache',
                    body=rdp_content)
Ejemplo n.º 6
0
    def update(self, **kwargs):
        """Edit an existing Schedule"""

        if self.auth_context is not None:
            auth_context = self.auth_context
        else:
            raise MistError("You are not authorized to update schedule")

        owner = auth_context.owner

        if kwargs.get('action'):
            if kwargs.get('action') not in [
                    'reboot', 'destroy', 'notify', 'start', 'stop'
            ]:
                raise BadRequestError("Action is not correct")

        script_id = kwargs.pop('script_id', '')
        if script_id:
            try:
                Script.objects.get(owner=owner, id=script_id, deleted=None)
            except me.DoesNotExist:
                raise ScriptNotFoundError('Script with id %s does not '
                                          'exist' % script_id)
            # SEC require permission RUN on script
            auth_context.check_perm('script', 'run', script_id)

        # for ui compatibility
        if kwargs.get('expires') == '':
            kwargs['expires'] = None
        if kwargs.get('max_run_count') == '':
            kwargs['max_run_count'] = None
        if kwargs.get('start_after') == '':
            kwargs['start_after'] = None
        # transform string to datetime
        if kwargs.get('expires'):
            try:
                if isinstance(kwargs['expires'], int):
                    if kwargs['expires'] > 5000000000:  # Timestamp in millis
                        kwargs['expires'] = kwargs['expires'] / 1000
                    kwargs['expires'] = datetime.datetime.fromtimestamp(
                        kwargs['expires'])
                else:
                    kwargs['expires'] = datetime.datetime.strptime(
                        kwargs['expires'], '%Y-%m-%d %H:%M:%S')
            except (ValueError, TypeError):
                raise BadRequestError('Expiration date value was not valid')

        if kwargs.get('start_after'):
            try:
                if isinstance(kwargs['start_after'], int):
                    if kwargs['start_after'] > 5000000000:  # Timestamp in ms
                        kwargs['start_after'] = kwargs['start_after'] / 1000
                    kwargs['start_after'] = datetime.datetime.fromtimestamp(
                        kwargs['start_after'])
                else:
                    kwargs['start_after'] = datetime.datetime.strptime(
                        kwargs['start_after'], '%Y-%m-%d %H:%M:%S')
            except (ValueError, TypeError):
                raise BadRequestError('Start-after date value was not valid')

        now = datetime.datetime.now()
        if self.schedule.expires and self.schedule.expires < now:
            raise BadRequestError('Date of future task is in the past. '
                                  'Please contact Marty McFly')
        if self.schedule.start_after and self.schedule.start_after < now:
            raise BadRequestError('Date of future task is in the past. '
                                  'Please contact Marty McFly')
        # Schedule selectors pre-parsing.
        try:
            self._update__preparse_machines(auth_context, kwargs)
        except MistError as exc:
            log.error("Error while updating schedule %s: %r", self.schedule.id,
                      exc)
            raise
        except Exception as exc:
            log.exception("Error while preparsing kwargs on update %s",
                          self.schedule.id)
            raise InternalServerError(exc=exc)

        action = kwargs.pop('action', '')
        if action:
            self.schedule.task_type = schedules.ActionTask(action=action)
        elif script_id:
            self.schedule.task_type = schedules.ScriptTask(script_id=script_id,
                                                           params=kwargs.pop(
                                                               'params', ''))

        schedule_type = kwargs.pop('schedule_type', '')

        if (schedule_type == 'crontab'
                or isinstance(self.schedule.schedule_type, schedules.Crontab)):
            schedule_entry = kwargs.pop('schedule_entry', {})

            if schedule_entry:
                for k in schedule_entry:
                    if k not in [
                            'minute', 'hour', 'day_of_week', 'day_of_month',
                            'month_of_year'
                    ]:
                        raise BadRequestError("Invalid key given: %s" % k)

                self.schedule.schedule_type = schedules.Crontab(
                    **schedule_entry)

        elif (schedule_type == 'interval'
              or type(self.schedule.schedule_type) == schedules.Interval):
            schedule_entry = kwargs.pop('schedule_entry', {})

            if schedule_entry:
                for k in schedule_entry:
                    if k not in ['period', 'every']:
                        raise BadRequestError("Invalid key given: %s" % k)

                self.schedule.schedule_type = schedules.Interval(
                    **schedule_entry)

        elif (schedule_type in ['one_off', 'reminder']
              or type(self.schedule.schedule_type) == schedules.OneOff):
            # implements Interval under the hood
            future_date = kwargs.pop('schedule_entry', '')

            if future_date:
                try:
                    if isinstance(future_date, int):
                        if future_date > 5000000000:  # Timestamp is in millis
                            future_date = future_date / 1000
                        future_date = datetime.datetime.fromtimestamp(
                            future_date)
                    else:
                        future_date = datetime.datetime.strptime(
                            future_date, '%Y-%m-%d %H:%M:%S')
                except (ValueError, TypeError):
                    raise BadRequestError('Date value was not valid')

                if future_date < now:
                    raise BadRequestError(
                        'Date of future task is in the past. '
                        'Please contact Marty McFly')

                delta = future_date - now
                notify_msg = kwargs.get('notify_msg', '')

                if schedule_type == 'reminder':
                    self.schedule.schedule_type = schedules.Reminder(
                        period='seconds',
                        every=delta.seconds,
                        entry=future_date,
                        message=notify_msg)
                else:
                    self.schedule.schedule_type = schedules.OneOff(
                        period='seconds',
                        every=delta.seconds,
                        entry=future_date)
                self.schedule.max_run_count = 1

                notify = kwargs.pop('notify', 0)
                if notify:
                    _delta = datetime.timedelta(0, notify)
                    notify_at = future_date - _delta
                    notify_at = notify_at.strftime('%Y-%m-%d %H:%M:%S')
                    params = {
                        'action': 'notify',
                        'schedule_type': 'reminder',
                        'description': 'Machine expiration reminder',
                        'task_enabled': True,
                        'schedule_entry': notify_at,
                        'selectors': kwargs.get('selectors'),
                        'notify_msg': notify_msg
                    }
                    name = self.schedule.name + '-reminder'
                    if self.schedule.reminder:
                        self.schedule.reminder.delete()
                    from mist.api.schedules.models import Schedule
                    self.schedule.reminder = Schedule.add(
                        auth_context, name, **params)

        # set schedule attributes
        try:
            kwargs.pop('selectors')
        except KeyError:
            pass
        for key, value in kwargs.items():
            if key in self.schedule._fields:
                setattr(self.schedule, key, value)

        try:
            self.schedule.save()
        except me.ValidationError as e:
            log.error("Error updating %s: %s", self.schedule.name, e.to_dict())
            raise BadRequestError({"msg": str(e), "errors": e.to_dict()})
        except me.NotUniqueError as exc:
            log.error("Schedule %s not unique error: %s", self.schedule, exc)
            raise ScheduleNameExistsError()
        except me.OperationError:
            raise ScheduleOperationError()
Ejemplo n.º 7
0
    def add(self, fail_on_invalid_params=False, **kwargs):
        """Add an entry to the database

        This is only to be called by `Script.add` classmethod to create
        a script. Fields `owner` and `name` are already populated in
        `self.script`. The `self.script` is not yet saved.
        """

        import mist.api.scripts.models as scripts

        # set description
        self.script.description = kwargs.pop('description', '')

        # set location
        location_type = kwargs.pop('location_type')
        if location_type not in ['inline', 'url', 'github']:
            raise BadRequestError('location type must be one of these: '
                                  '(inline, github, url)]')

        entrypoint = kwargs.pop('entrypoint', '')

        if location_type == 'inline':
            script_entry = kwargs.pop('script' '')
            self.script.location = scripts.InlineLocation(
                source_code=script_entry)
        elif location_type == 'github':
            script_entry = kwargs.pop('script', '')
            self.script.location = scripts.GithubLocation(
                repo=script_entry, entrypoint=entrypoint)
        elif location_type == 'url':
            script_entry = kwargs.pop('script', '')
            self.script.location = scripts.UrlLocation(url=script_entry,
                                                       entrypoint=entrypoint)
        else:
            raise BadRequestError("Param 'location_type' must be in "
                                  "('url', 'github', 'inline').")

        # specific check
        self._preparse_file()

        errors = {}
        for key in list(kwargs.keys()):
            if key not in self.script._script_specific_fields:
                error = "Invalid parameter %s=%r." % (key, kwargs[key])
                if fail_on_invalid_params:
                    errors[key] = error
                else:
                    log.warning(error)
                    kwargs.pop(key)

        if errors:
            log.error("Error adding %s: %s", self.script, errors)
            raise BadRequestError({
                'msg':
                "Invalid parameters %s." % list(errors.keys()),
                'errors':
                errors,
            })

        for key, value in kwargs.items():
            setattr(self.script, key, value)

        try:
            self.script.save()
        except me.ValidationError as exc:
            log.error("Error adding %s: %s", self.script.name, exc.to_dict())
            raise BadRequestError({'msg': str(exc), 'errors': exc.to_dict()})
        except me.NotUniqueError as exc:
            log.error("Script %s not unique error: %s", self.script.name, exc)
            raise ScriptNameExistsError()
        self.script.owner.mapper.update(self.script)
        log.info("Added script with name '%s'", self.script.name)
        trigger_session_update(self.script.owner, ['scripts'])
Ejemplo n.º 8
0
    def _update__preparse_machines(self, auth_context, kwargs):
        """Preparse machines arguments to `self.update`

        This is called by `self.update` when adding a new schedule,
        in order to apply pre processing to the given params. Any subclass
        that requires any special pre processing of the params passed to
        `self.update`, SHOULD override this method.

        Params:
        kwargs: A dict of the keyword arguments that will be set as attributes
            to the `Schedule` model instance stored in `self.schedule`.
            This method is expected to modify `kwargs` in place and set the
            specific field of each scheduler.

        Subclasses MAY override this method.

        """
        sel_cls = {
            'tags': TaggingSelector,
            'machines': GenericResourceSelector,
            'field': FieldSelector,
            'age': MachinesAgeSelector
        }

        if kwargs.get('selectors'):
            self.schedule.selectors = []
        for selector in kwargs.get('selectors', []):
            if selector.get('type') not in sel_cls:
                raise BadRequestError()
            if selector['type'] == 'field':
                if selector['field'] not in ('created', 'state',
                                             'cost__monthly'):
                    raise BadRequestError()
            sel = sel_cls[selector.get('type')]()
            sel.update(**selector)
            self.schedule.selectors.append(sel)

        action = kwargs.get('action')

        # check permissions
        check = False
        for selector in self.schedule.selectors:
            if selector.ctype == 'machines':
                for mid in selector.ids:
                    try:
                        machine = Machine.objects.get(id=mid,
                                                      state__ne='terminated')
                    except Machine.DoesNotExist:
                        raise NotFoundError('Machine state is terminated')

                    # SEC require permission READ on cloud
                    auth_context.check_perm("cloud", "read", machine.cloud.id)

                    if action and action not in ['notify']:
                        # SEC require permission ACTION on machine
                        auth_context.check_perm("machine", action, mid)
                    else:
                        # SEC require permission RUN_SCRIPT on machine
                        auth_context.check_perm("machine", "run_script", mid)
                check = True
            elif selector.ctype == 'tags':
                if action and action not in ['notify']:
                    # SEC require permission ACTION on machine
                    auth_context.check_perm("machine", action, None)
                else:
                    # SEC require permission RUN_SCRIPT on machine
                    auth_context.check_perm("machine", "run_script", None)
                check = True
        if not check:
            raise BadRequestError("Specify at least machine ids or tags")

        return
Ejemplo n.º 9
0
def add_script(request):
    """
    Add script to user scripts
    ADD permission required on SCRIPT
    ---
    name:
      type: string
      required: true
    script:
      type: string
      required: false
    script_inline:
      type: string
      required: false
    script_github:
      type: string
      required: false
    script_url:
      type: string
      required: false
    location_type:
      type: string
      required: true
    entrypoint:
      type: string
    exec_type:
      type: string
      required: true
    description:
      type: string
    extra:
      type: dict
    """

    params = params_from_request(request)

    # SEC
    auth_context = auth_context_from_request(request)
    script_tags = auth_context.check_perm("script", "add", None)

    kwargs = {}

    for key in ('name', 'script', 'location_type', 'entrypoint', 'exec_type',
                'description', 'extra', 'script_inline', 'script_url',
                'script_github'):
        kwargs[key] = params.get(key)  # TODO maybe change this

    kwargs['script'] = choose_script_from_params(kwargs['location_type'],
                                                 kwargs['script'],
                                                 kwargs['script_inline'],
                                                 kwargs['script_url'],
                                                 kwargs['script_github'])
    for key in ('script_inline', 'script_url', 'script_github'):
        kwargs.pop(key)

    name = kwargs.pop('name')
    exec_type = kwargs.pop('exec_type')

    if exec_type == 'executable':
        script = ExecutableScript.add(auth_context.owner, name, **kwargs)
    elif exec_type == 'ansible':
        script = AnsibleScript.add(auth_context.owner, name, **kwargs)
    elif exec_type == 'collectd_python_plugin':
        script = CollectdScript.add(auth_context.owner, name, **kwargs)
    else:
        raise BadRequestError(
            "Param 'exec_type' must be in ('executable', 'ansible', "
            "'collectd_python_plugin').")

    if script_tags:
        add_tags_to_resource(auth_context.owner, script, script_tags.items())

    script = script.as_dict()

    if 'job_id' in params:
        script['job_id'] = params['job_id']

    return script
Ejemplo n.º 10
0
    def list_zones(self):
        """
        This is the public method to call when requesting all the DNS zones
        under a specific cloud.
        """

        # TODO: Adding here for circular dependency issue. Need to fix this.
        from mist.api.dns.models import Zone

        # Fetch zones from libcloud connection.
        pr_zones = self._list_zones__fetch_zones()

        zones = []
        new_zones = []
        for pr_zone in pr_zones:
            # FIXME: We are using the zone_id and owner instead of the
            # cloud_id to search for existing zones because providers
            # allow access to the same zone from multiple clouds so
            # we can end up adding the same zone many times under
            # different clouds.
            try:
                zones_q = Zone.objects(owner=self.cloud.owner,
                                       zone_id=pr_zone.id,
                                       deleted=None)
                for zone in zones_q:
                    if zone.cloud.ctl.provider == self.cloud.ctl.provider:
                        break
                else:
                    raise Zone.DoesNotExist
            except Zone.DoesNotExist:
                log.info("Zone: %s/domain: %s not in the database, creating.",
                         pr_zone.id, pr_zone.domain)
                zone = Zone(cloud=self.cloud,
                            owner=self.cloud.owner,
                            zone_id=pr_zone.id)
                new_zones.append(zone)
            zone.domain = pr_zone.domain
            zone.type = pr_zone.type
            zone.ttl = pr_zone.ttl
            zone.extra = pr_zone.extra
            try:
                zone.save()
            except me.ValidationError as exc:
                log.error("Error updating %s: %s", zone, exc.to_dict())
                raise BadRequestError({
                    'msg': exc.message,
                    'errors': exc.to_dict()
                })
            except me.NotUniqueError as exc:
                log.error("Zone %s not unique error: %s", zone, exc)
                raise ZoneExistsError()
            zones.append(zone)
        self.cloud.owner.mapper.update(new_zones)

        # Delete any zones in the DB that were not returned by the provider
        # meaning they were deleted otherwise.
        Zone.objects(
            cloud=self.cloud, id__nin=[z.id for z in zones],
            deleted=None).update(set__deleted=datetime.datetime.utcnow())

        # Format zone information.
        return zones
Ejemplo n.º 11
0
    def list_records(self, zone):
        """
        Public method to return a list of  records under a specific zone.
        """
        # Fetch records from libcloud connection.
        pr_records = self._list_records__fetch_records(zone.zone_id)

        # TODO: Adding here for circular dependency issue. Need to fix this.
        from mist.api.dns.models import Record, RECORDS

        records = []
        new_records = []
        for pr_record in pr_records:
            dns_cls = RECORDS[pr_record.type]
            try:
                record = Record.objects.get(zone=zone,
                                            record_id=pr_record.id,
                                            deleted=None)
            except Record.DoesNotExist:
                log.info("Record: %s not in the database, creating.",
                         pr_record.id)
                if pr_record.type not in RECORDS:
                    log.error("Unsupported record type '%s'", pr_record.type)
                    continue

                record = dns_cls(record_id=pr_record.id, zone=zone)
                new_records.append(record)
            # We need to check if any of the information returned by the
            # provider is different than what we have in the DB
            record.name = pr_record.name or ""
            record.type = pr_record.type
            record.ttl = pr_record.ttl
            record.extra = pr_record.extra

            self._list_records__postparse_data(pr_record, record)
            try:
                record.save()
            except me.ValidationError as exc:
                log.error("Error updating %s: %s", record, exc.to_dict())
                raise BadRequestError({
                    'msg': exc.message,
                    'errors': exc.to_dict()
                })
            except me.NotUniqueError as exc:
                log.error("Record %s not unique error: %s", record, exc)
                raise RecordExistsError()
            # There's a chance that we have received duplicate records as for
            # example for Route NS records, so skip adding it to the list if we
            # already have it
            for rec in records:
                if rec.record_id == record.record_id:
                    records.remove(rec)
                    break
            records.append(record)
        self.cloud.owner.mapper.update(new_records)

        # Then delete any records that are in the DB for this zone but were not
        # returned by the list_records() method meaning the were deleted in the
        # DNS provider.
        Record.objects(
            zone=zone, id__nin=[r.id for r in records],
            deleted=None).update(set__deleted=datetime.datetime.utcnow())

        # Format zone information.
        return records
Ejemplo n.º 12
0
def get_events(auth_context, owner_id='', user_id='', event_type='', action='',
               limit=0, start=0, stop=0, newest=True, error=None, **kwargs):
    """Fetch logged events.

    This generator yields a series of logs after querying Elasticsearch.

    The initial query is extended with additional terms based on the inputs
    provided. Also, extra filtering may be applied in order to perform RBAC
    on the fly given the permissions granted to the requesting User.

    All Elasticsearch indices are in the form of <app|ui>-logs-<date>.

    """
    # Restrict access to UI logs to Admins only.
    is_admin = auth_context and auth_context.user.role == 'Admin'
    # Attempt to enforce owner_id in case of non-Admins.
    if not is_admin and not owner_id:
        owner_id = auth_context.owner.id if auth_context else None

    # Construct base Elasticsearch query.
    index = "%s-logs-*" % ("*" if is_admin else "app")
    query = {
        "query": {
            "bool": {
                "filter": {
                    "bool": {
                        "must": [
                            {
                                "range": {
                                    "@timestamp": {
                                        "gte": int(start * 1000),
                                        "lte": int(stop * 1000) or "now"
                                    }
                                }
                            }
                        ],
                        "must_not": []
                    }
                }
            }
        },
        "sort": [
            {
                "@timestamp": {
                    "order": ("desc" if newest else "asc")
                }
            }
        ],
        "size": (limit or 50)
    }
    # Match action.
    if action:
        query["query"]["bool"]["filter"]["bool"]["must"].append(
            {"term": {'action': action}}
        )
    # Fetch logs corresponding to the current Organization.
    if owner_id:
        query["query"]["bool"]["filter"]["bool"]["must"].append(
            {"term": {"owner_id": owner_id}}
        )
    # Match the user's ID, if provided.
    if user_id:
        query["query"]["bool"]["filter"]["bool"]["must"].append(
            {"term": {"user_id": user_id}}
        )
    # Specify whether to fetch stories that ended with an error.
    if error:
        query["query"]["bool"]["filter"]["bool"]["must_not"].append(
            {"term": {"error": False}}
        )
    elif error is False:
        query["query"]["bool"]["filter"]["bool"]["must"].append(
            {"term": {"error": False}}
        )
    # Perform a complex "Query String" Query that may span fields.
    if 'filter' in kwargs:
        f = kwargs.pop('filter')
        query_string = {
            'query': f,
            'analyze_wildcard': True,
            'default_operator': 'and',
            'allow_leading_wildcard': False
        }
        query["query"]["bool"]["filter"]["bool"]["must"].append({
            'query_string': query_string
        })

    # Extend query with additional kwargs.
    for key, value in kwargs.iteritems():
        query["query"]["bool"]["filter"]["bool"]["must"].append(
            {"term": {key: value}}
        )

    # Apply RBAC for non-Owners.
    if auth_context and not auth_context.is_owner():
        filter_logs(auth_context, query)

    # Query Elasticsearch.
    try:
        result = es().search(index=index, doc_type=event_type, body=query)
    except eexc.NotFoundError as err:
        log.error('Error %s during ES query: %s', err.status_code, err.info)
        raise NotFoundError(err.error)
    except (eexc.RequestError, eexc.TransportError) as err:
        log.error('Error %s during ES query: %s', err.status_code, err.info)
        raise BadRequestError(err.error)
    except (eexc.ConnectionError, eexc.ConnectionTimeout) as err:
        log.error('Error %s during ES query: %s', err.status_code, err.info)
        raise ServiceUnavailableError(err.error)

    for hit in result['hits']['hits']:
        event = hit['_source']
        if not event.get('action'):
            log.error('Skipped event %s, missing action', event['log_id'])
            continue
        try:
            extra = json.loads(event.pop('extra'))
        except Exception as exc:
            log.error('Failed to parse extra of event %s [%s]: '
                      '%s', event['log_id'], event['action'], exc)
        else:
            for key, value in extra.iteritems():
                event[key] = value
        yield event