Beispiel #1
0
 def _check_related_objects(self):
     """Checks that Snapshot mapped only to Audits before deletion"""
     for obj in self.related_objects():
         if obj.type not in ("Audit", "Snapshot"):
             db.session.rollback()
             raise exceptions.Conflict(
                 description="Snapshot should be mapped "
                 "to Audit only before deletion")
         elif obj.type == "Snapshot":
             rel = relationship.Relationship
             related_originals = db.session.query(
                 rel.query.filter(
                     or_(
                         and_(rel.source_id == obj.child_id,
                              rel.source_type == obj.child_type,
                              rel.destination_id == self.child_id,
                              rel.destination_type == self.child_type),
                         and_(rel.destination_id == obj.child_id,
                              rel.destination_type == obj.child_type,
                              rel.source_id == self.child_id,
                              rel.source_type ==
                              self.child_type))).exists()).scalar()
             if related_originals:
                 db.session.rollback()
                 raise exceptions.Conflict(
                     description="Snapshot should be mapped to "
                     "Audit only before deletion")
Beispiel #2
0
def add_user():
    """
    Add a new user to PINE, with the details provided in the json body of this request (id, email, and password hash).
    This method will calculate and store a password hash based upon the provided password
    :return: Response
    """
    if not request.is_json:
        raise exceptions.BadRequest()
    body = request.get_json()

    # first check that username and email are not already in user
    if not "id" in body or not body["id"]:
        raise exceptions.BadRequest(
            description="Missing id in body JSON data.")
    try:
        user = users.get_user(body["id"])
        if user != None:
            raise exceptions.Conflict(
                description="User with id {} already exists.".format(
                    body["username"]))
    except exceptions.NotFound:
        pass
    if not "email" in body or not body["email"]:
        raise exceptions.BadRequest(
            description="Missing email in body JSON data.")
    user = users.get_user_by_email(body["email"])
    if user != None:
        raise exceptions.Conflict(
            description="User with email {} already exists.".format(
                body["email"]))

    # replace the password with a hash
    if not "passwd" in body or not body["passwd"]:
        raise exceptions.BadRequest(
            description="Missing passwd in body JSON data.")
    body["passwdhash"] = password.hash_password(body["passwd"])
    del body["passwd"]

    body["_id"] = body["id"]
    del body["id"]

    # check other fields as required by eve schema
    if not "firstname" in body or not body[
            "firstname"] or "lastname" not in body or not body["lastname"]:
        raise exceptions.BadRequest(
            description="Missing firstname or lastname in body JSON data.")
    if not "roles" in body or not body["roles"]:
        raise exceptions.BadRequest(
            description="Missing/empty roles in body JSON data.")
    if "description" in body and body["description"] == None:
        del body["description"]

    # post to data server
    resp = service.post("users", json=body)
    return service.convert_response(resp)
Beispiel #3
0
def start_stop_notebook(namespace, notebook, request_body):
    stop = request_body[STOP_ATTR]

    patch_body = {}
    if stop:
        if notebook_is_stopped(namespace, notebook):
            raise exceptions.Conflict("Notebook %s/%s is already stopped." %
                                      (namespace, notebook))

        log.info("Stopping Notebook Server '%s/%s'", namespace, notebook)
        now = dt.datetime.now(dt.timezone.utc)
        timestamp = now.strftime("%Y-%m-%dT%H:%M:%SZ")

        patch_body = {
            "metadata": {
                "annotations": {
                    status.STOP_ANNOTATION: timestamp
                }
            }
        }
    else:
        log.info("Starting Notebook Server '%s/%s'", namespace, notebook)
        patch_body = {
            "metadata": {
                "annotations": {
                    status.STOP_ANNOTATION: None
                }
            }
        }

    log.info("Sending PATCH to Notebook %s/%s: %s", namespace, notebook,
             patch_body)
    api.patch_notebook(notebook, namespace, patch_body)
Beispiel #4
0
 def get_frontend_ip(self):
     with pyroute2.NetNS(constants.AMPHORA_NAMESPACE) as ns:
         addr = ns.get_addr(label=constants.NETNS_PRIMARY_INTERFACE)[0]
         for item in addr['attrs']:
             if 'IFA_ADDRESS' in item:
                 return item[1]
     raise exceptions.Conflict(description="Didn't get frontent ip.")
Beispiel #5
0
def post(
    body: typing.Dict[str, typing.Any]
) -> typing.Tuple[typing.Dict[str, typing.Any], int]:
    LOG.info('Received request to create a new user')
    try:
        single_user = user.UserDB.query.filter_by(
            user_name=body['user_name']).one()
        if single_user:
            exceptions.Conflict(
                description='User by this name already exists in the system')
    except exc.NoResultFound:
        LOG.info('User doesn\'t exist, system will add it')

    new_user = user.UserDB(**body)
    new_user.id = str(uuid.uuid4())
    database.db.session.add(new_user)
    try:
        database.db.session.commit()
        LOG.info('Added new user')
    except Exception:
        LOG.exception('Failed to add user')
        database.db.session.rollback()
        exceptions.InternalServerError(
            description='Failed to add new user to the system')
    return user.User(
        id=str(new_user.id),
        userName=new_user.user_name,
        firstName=new_user.first_name,
        lastName=new_user.last_name,
        email=new_user.email,
        address=new_user.address,
        postalCode=new_user.postal_code,
    ).dict(), 201
Beispiel #6
0
    def post(self, id, **kwargs):
        self._audit_before()

        entity = self.dao.retrieve(id)
        if entity:
            raise http_exception.Conflict(description = 'Entity already exist')
        else:
            raise exception.EntityNotFoundError(id)
Beispiel #7
0
 def _check_no_assessments(self):
     """Check that audit has no assessments before delete."""
     if self.assessments:
         db.session.rollback()
         raise wzg_exceptions.Conflict(
             "The audit cannot be deleted due to mapped assessment(s) to this "
             "audit. Please delete assessment(s) mapped to this audit first "
             "before deleting the audit.", )
Beispiel #8
0
 def upload_file(self, name, stream):
     blob_client = self.get_blob_client(name)
     try:
         blob_client.upload_blob(stream)
     except az_exc.ResourceExistsError as e:
         print("DLOG: FAULT UPLOAD")
         raise exceptions.Conflict()
     return name
Beispiel #9
0
def create_user(first_name, last_name, email, password):
    """
    Creates a new user with the provided credentials, and returns a token.
    """

    if User.by_email(email):
        raise exceptions.Conflict(description='User already exists.')

    user = UserFactory.instance.create(first_name, last_name, email,
                                       password).save()
    token = generate_token_for_user(user)
    return jsonify(snake_to_camel_case_dict({'token': token.decode("utf-8")}))
Beispiel #10
0
def delete_pvc(pvc, namespace):
    """
    Delete a PVC only if it is not used from any Pod
    """
    pods = common_utils.get_pods_using_pvc(pvc, namespace)
    if pods:
        pod_names = [p.metadata.name for p in pods]
        raise exceptions.Conflict("Cannot delete PVC '%s' because it is being"
                                  " used by pods: %s" % (pvc, pod_names))

    log.info("Deleting PVC %s/%s...", namespace, pvc)
    api.delete_pvc(pvc, namespace)
    log.info("Successfully deleted PVC %s/%s", namespace, pvc)

    return api.success_response("message",
                                "PVC %s successfully deleted." % pvc)
Beispiel #11
0
def delete_pvc(pvc, namespace):
    """
    Delete a PVC, even if it is only mounted on PVCViewer Pods.
    Get list of PVCViewers that use the requested PVC. If no other Pods
    are using that PVC then delete the Viewer Pods as well as the PVC.
    """
    pods = common_utils.get_pods_using_pvc(pvc, namespace)
    non_viewer_pods = [p for p in pods if not rok_utils.is_viewer_pod(p)]
    if non_viewer_pods:
        pod_names = [p.metadata.name for p in non_viewer_pods]
        raise exceptions.Conflict("Cannot delete PVC '%s' because it is being"
                                  " used by pods: %s" % (pvc, pod_names))

    log.info("Deleting PVC %s/%s...", namespace, pvc)
    api.delete_pvc(pvc, namespace)
    log.info("Successfully deleted PVC %s/%s", namespace, pvc)

    return api.success_response("message",
                                "PVC %s successfully deleted." % pvc)
Beispiel #12
0
def new_recording(path):
    recording_type = recording.get_recording_type(path)
    if not recording_type:
        raise error.ControlPlaneError('Unknown recording type')

    can_exist = flask.request.method == 'POST'

    try:
        directory, file = recording.get_recording(
            app.config['SNMPSIM_MGMT_DATAROOT'], path,
            not_exists=can_exist, ensure_path=True)

    except error.ControlPlaneError:
        raise exceptions.Conflict('Bad recording path (is it already exists?)')

    # TODO: is it a memory hog when .snmprec file is large?
    with tempfile.NamedTemporaryFile(dir=directory, delete=False) as fl:
        fl.write(flask.request.data)

    os.rename(fl.name, os.path.join(directory, file))

    return flask.Response(status=204)
Beispiel #13
0
    def upload_lvs_listener_config(self, listener_id):
        stream = loadbalancer.Wrapped(flask.request.stream)
        NEED_CHECK = True

        if not os.path.exists(util.keepalived_lvs_dir()):
            os.makedirs(util.keepalived_lvs_dir())
        if not os.path.exists(util.keepalived_backend_check_script_dir()):
            current_file_dir, _ = os.path.split(os.path.abspath(__file__))

            try:
                script_dir = os.path.join(
                    os.path.abspath(os.path.join(current_file_dir, '../..')),
                    'utils')
                assert True is os.path.exists(script_dir)
                assert True is os.path.exists(
                    os.path.join(script_dir, CHECK_SCRIPT_NAME))
            except Exception as e:
                raise exceptions.Conflict(
                    description='%(file_name)s not Found for '
                    'UDP Listener %(listener_id)s' % {
                        'file_name': CHECK_SCRIPT_NAME,
                        'listener_id': listener_id
                    }) from e
            os.makedirs(util.keepalived_backend_check_script_dir())
            shutil.copy2(os.path.join(script_dir, CHECK_SCRIPT_NAME),
                         util.keepalived_backend_check_script_path())
            os.chmod(util.keepalived_backend_check_script_path(), stat.S_IEXEC)
        # Based on current topology setting, only the amphora instances in
        # Active-Standby topology will create the directory below. So for
        # Single topology, it should not create the directory and the check
        # scripts for status change.
        if (CONF.controller_worker.loadbalancer_topology !=
                consts.TOPOLOGY_ACTIVE_STANDBY):
            NEED_CHECK = False

        conf_file = util.keepalived_lvs_cfg_path(listener_id)
        flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
        # mode 00644
        mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
        with os.fdopen(os.open(conf_file, flags, mode), 'wb') as f:
            b = stream.read(BUFFER)
            while b:
                f.write(b)
                b = stream.read(BUFFER)

        init_system = util.get_os_init_system()

        file_path = util.keepalived_lvs_init_path(init_system, listener_id)

        if init_system == consts.INIT_SYSTEMD:
            template = SYSTEMD_TEMPLATE

            # Render and install the network namespace systemd service
            util.install_netns_systemd_service()
            util.run_systemctl_command(consts.ENABLE,
                                       consts.AMP_NETNS_SVC_PREFIX)
        elif init_system == consts.INIT_UPSTART:
            template = UPSTART_TEMPLATE
        elif init_system == consts.INIT_SYSVINIT:
            template = SYSVINIT_TEMPLATE
        else:
            raise util.UnknownInitError()

        # Render and install the keepalivedlvs init script
        if init_system == consts.INIT_SYSTEMD:
            # mode 00644
            mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
        else:
            # mode 00755
            mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH
                    | stat.S_IXOTH)
        keepalived_pid, vrrp_pid, check_pid = util.keepalived_lvs_pids_path(
            listener_id)
        if not os.path.exists(file_path):
            with os.fdopen(os.open(file_path, flags, mode), 'w') as text_file:
                text = template.render(
                    keepalived_pid=keepalived_pid,
                    vrrp_pid=vrrp_pid,
                    check_pid=check_pid,
                    keepalived_cmd=consts.KEEPALIVED_CMD,
                    keepalived_cfg=util.keepalived_lvs_cfg_path(listener_id),
                    amphora_nsname=consts.AMPHORA_NAMESPACE,
                    amphora_netns=consts.AMP_NETNS_SVC_PREFIX,
                    administrative_log_facility=(
                        CONF.amphora_agent.administrative_log_facility),
                )
                text_file.write(text)

        # Make sure the keepalivedlvs service is enabled on boot
        if init_system == consts.INIT_SYSTEMD:
            util.run_systemctl_command(
                consts.ENABLE, "octavia-keepalivedlvs-%s" % str(listener_id))
        elif init_system == consts.INIT_SYSVINIT:
            init_enable_cmd = "insserv {file}".format(file=file_path)
            try:
                subprocess.check_output(init_enable_cmd.split(),
                                        stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as e:
                LOG.debug(
                    'Failed to enable '
                    'octavia-keepalivedlvs service: '
                    '%(err)s', {'err': str(e)})
                return webob.Response(json=dict(
                    message="Error enabling "
                    "octavia-keepalivedlvs service",
                    details=e.output),
                                      status=500)

        if NEED_CHECK:
            # inject the check script for keepalived process
            script_path = os.path.join(util.keepalived_check_scripts_dir(),
                                       KEEPALIVED_CHECK_SCRIPT_NAME)
            if not os.path.exists(script_path):
                if not os.path.exists(util.keepalived_check_scripts_dir()):
                    os.makedirs(util.keepalived_check_scripts_dir())

                with os.fdopen(os.open(script_path, flags, stat.S_IEXEC),
                               'w') as script_file:
                    text = check_script_file_template.render(
                        consts=consts,
                        init_system=init_system,
                        keepalived_lvs_pid_dir=util.keepalived_lvs_dir())
                    script_file.write(text)
            util.vrrp_check_script_update(None, consts.AMP_ACTION_START)

        res = webob.Response(json={'message': 'OK'}, status=200)
        res.headers['ETag'] = stream.get_md5()
        return res
Beispiel #14
0
 def _check_no_assessments(self):
   """Check that audit has no assessments before delete."""
   if self.assessments or self.assessment_templates:
     db.session.rollback()
     raise wzg_exceptions.Conflict(errors.MAPPED_ASSESSMENT)
Beispiel #15
0
 def _check_no_audits(self):
   """Check that audit has no assessments before delete."""
   if self.audits:
     db.session.rollback()
     raise wzg_exceptions.Conflict(errors.MAPPED_AUDITS)
Beispiel #16
0
def printjob_create(org_uuid):
    data = request.json
    if not data:
        return abort(make_response(jsonify(message="Missing payload"), 400))
    gcode_uuid = data.get("gcode", None)
    printer_uuid = data.get("printer",
                            None)  # FIXME: this should be part of the path
    if not gcode_uuid or not printer_uuid:
        return abort(
            make_response(
                jsonify(message="Missing gcode_uuid or printer_uuid"), 400))

    printer = printers.get_printer(printer_uuid)
    if not printer or printer['organization_uuid'] != org_uuid:
        raise http_exceptions.UnprocessableEntity(
            f"Invalid printer {printer_uuid} - does not exist.")

    gcode = gcodes.get_gcode(gcode_uuid)
    if not gcode:
        raise http_exceptions.UnprocessableEntity(
            "Invalid gcode {gcode_uuid} - does not exist.")

    network_client = network_clients.get_network_client(
        printer["network_client_uuid"])
    printer_data = dict(network_client)
    printer_data.update(dict(printer))
    printer_inst = clients.get_printer_instance(printer_data)
    try:
        printer_inst.upload_and_start_job(gcode["absolute_path"],
                                          gcode["path"])
    except DeviceInvalidState as e:
        raise http_exceptions.Conflict(*e.args)
    except DeviceCommunicationError as e:
        raise http_exceptions.GatewayTimeout(*e.args)
    # TODO: robin - add_printjob should be method of printer and printer a
    #               method of organization
    printjob_uuid = printjobs.add_printjob(
        gcode_uuid=gcode["uuid"],
        organization_uuid=org_uuid,
        printer_uuid=printer["uuid"],
        user_uuid=get_current_user()["uuid"],
        gcode_data={
            "uuid": gcode["uuid"],
            "filename": gcode["filename"],
            "size": gcode["size"],
            "available": True,
        },
        # FIXME: printer data should be kept in printer object only
        printer_data={
            "ip": printer_inst.ip,
            "port": printer_inst.port,
            "hostname": printer_inst.hostname,
            "name": printer_inst.name,
            "client": printer_inst.client,
        },
    )
    return (
        jsonify({
            "uuid": printjob_uuid,
            "user_uuid": get_current_user()["uuid"]
        }),
        201,
    )