def _block(brain_or_object): if req.get("only_children"): return get_children(brain_or_object) # extract the data using the default info adapter info = IInfo(brain_or_object)() # might be None for mixed type catalog results, e.g. in the search route scoped_endpoint = endpoint if scoped_endpoint is None: scoped_endpoint = get_endpoint(get_portal_type(brain_or_object)) info.update(get_url_info(brain_or_object, scoped_endpoint)) # switch to wake up the object and complete the informations with the # data of the content adapter if complete: obj = get_object(brain_or_object) info.update(IInfo(obj)()) info.update(get_parent_info(obj)) if req.get_children(): info.update(get_children(obj)) return info
def copy_items(portal_type=None, uid=None, endpoint=None, **kw): """ copy items """ # disable CSRF req.disable_csrf_protection() # try to find the requested objects objects = find_objects(uid=uid) # No objects could be found, bail out if not objects: fail(404, "No Objects could be found") # We support only to copy a single object if len(objects) > 1: fail(400, "Can only copy one object at a time") # We don't want to copy the portal object if filter(lambda o: is_root(o), objects): fail(400, "Can not copy the portal object") # cut the object obj = objects[0] request = req.getRequest() obj.aq_parent.manage_copyObjects(obj.getId(), REQUEST=request) request.response.setHeader("Content-Type", "application/json") info = IInfo(obj)() return [info]
def delete_items(portal_type=None, uid=None, endpoint=None, **kw): """ delete items 1. If the uid is given, we can ignore the request body and delete the object with the given uid (if the uid was valid). 2. If no uid is given, the user wants to delete more than one item. => go through each item and extract the uid. Delete it afterwards. // we should do this kind of transaction base. So if we can not get an // object for an uid, no item will be deleted. 3. we could check if the portal_type matches, just to be sure the user wants to delete the right content. """ # disable CSRF req.disable_csrf_protection() # try to find the requested objects objects = find_objects(uid=uid) # We don't want to delete the portal object if filter(lambda o: is_root(o), objects): fail(400, "Can not delete the portal object") results = [] for obj in objects: info = IInfo(obj)() info["deleted"] = delete_object(obj) results.append(info) if not results: fail(404, "No Objects could be found") return results
def paste_items(portal_type=None, request=None, uid=None, endpoint=None): """ paste items """ # try to find the requested objects objects = find_objects(uid=uid) # No objects could be found, bail out if not objects: raise APIError(404, "No Objects could be found") # check if the cookie is there cookie = req.get_cookie("__cp") if cookie is None: raise APIError(400, "No data found to paste") # We support only to copy a single object if len(objects) > 1: raise APIError(400, "Can only paste to one location") # cut the object obj = objects[0] # paste the object results = obj.manage_pasteObjects(cookie) out = [] for result in results: new_id = result.get("new_id") pasted = obj.get(new_id) if pasted: out.append(IInfo(pasted)()) return out
def cut_items(portal_type=None, request=None, uid=None, endpoint=None): """ cut items """ # try to find the requested objects objects = find_objects(uid=uid) # No objects could be found, bail out if not objects: raise APIError(404, "No Objects could be found") # We support only to cut a single object if len(objects) > 1: raise APIError(400, "Can only cut one object at a time") # We don't want to cut the portal object if filter(lambda o: is_root(o), objects): raise APIError(400, "Can not cut the portal object") # cut the object obj = objects[0] obj.aq_parent.manage_cutObjects(obj.getId(), REQUEST=request) request.response.setHeader("Content-Type", "application/json") info = IInfo(obj)() return [info]
def delete_items(portal_type=None, request=None, uid=None, endpoint=None): """ delete items 1. If the uid is given, we can ignore the request body and delete the object with the given uid (if the uid was valid). 2. If no uid is given, the user wants to delete more than one item. => go through each item and extract the uid. Delete it afterwards. // we should do this kind of transaction base. So if we can not get an // object for an uid, no item will be deleted. 3. we could check if the portal_type matches, just to be sure the user wants to delete the right content. """ # the data to update records = req.get_request_data() # we have an uid -> try to get an object for it obj = get_object_by_uid(uid) if obj: info = IInfo(obj)() info["deleted"] = delete_object(obj) return [info] # no uid -> go through the record items results = [] for record in records: obj = get_object_by_record(record) # no object found for this record if obj is None: continue info = IInfo(obj)() info["deleted"] = delete_object(obj) results.append(info) if not results: raise APIError(400, "No Objects could be deleted") return results
def get_children(obj): """ returns the contents for this object """ # ensure we have an object obj = get_object(obj) if is_folderish(obj) is False: return { "children": None, } children = [] for content in obj.listFolderContents(): endpoint = get_endpoint(get_portal_type(content)) child = { "uid": get_uid(content), "url": get_url(content), "api_url": url_for(endpoint, uid=get_uid(content)), } child.update(IInfo(content)()) children.append(child) return {"children": children}
def test_portal_adapter(self): obj = self.portal adapter = IInfo(obj) data = adapter() self.assertEqual(data.get("uid"), 0)
def test_at_adapter(self): obj = self.get_document_obj() adapter = IInfo(obj) data = adapter() self.assertEqual(data.get("uid"), obj.UID())
def test_brain_adapter(self): brain = self.get_document_brain() adapter = IInfo(brain) data = adapter() self.assertEqual(data.get("uid"), brain.UID)
def get_info(brain_or_object, endpoint=None, complete=False): """Extract the data from the catalog brain or object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :param complete: Flag to wake up the object and fetch all data :type complete: bool :returns: Data mapping for the object/catalog brain :rtype: dict """ # extract the data from the initial object with the proper adapter info = IInfo(brain_or_object).to_dict() # update with url info (always included) url_info = get_url_info(brain_or_object, endpoint) info.update(url_info) # include the parent url info parent = get_parent_info(brain_or_object) info.update(parent) # add the complete data of the object if requested # -> requires to wake up the object if it is a catalog brain if complete: # ensure we have a full content object obj = get_object(brain_or_object) # get the compatible adapter adapter = IInfo(obj) # update the data set with the complete information info.update(adapter.to_dict()) # add workflow data if the user requested it # -> only possible if `?complete=yes` if req.get_workflow(False): workflow = get_workflow_info(obj) info.update({"workflow": workflow}) # add sharing data if the user requested it # -> only possible if `?complete=yes` if req.get_sharing(False): sharing = get_sharing_info(obj) info.update({"sharing": sharing}) return info