def read_obj(cls, uuid, obj_type=None): ok, objs = cls._cassandra.object_read(obj_type or cls.obj_type, [uuid]) if not ok: cls._logger.error( 'Cannot read %s %s, error %s' % (obj_type, uuid, objs)) raise NoIdError(uuid) return objs[0]
def read_obj(cls, uuid, obj_type=None, fields=None): ok, objs = cls._object_db.object_read(obj_type or cls.obj_type, [uuid], field_names=fields) if not ok: cls._logger.error('Cannot read %s %s, error %s' % (obj_type, uuid, objs)) raise NoIdError(uuid) return objs[0]
def fq_name_to_uuid(self, obj_type, fq_name): method_name = obj_type.replace('-', '_') fq_name_str = ':'.join(fq_name) col_start = '%s:' % (utils.encode_string(fq_name_str)) col_fin = '%s;' % (utils.encode_string(fq_name_str)) try: col_info_iter = self._obj_fq_name_cf.xget( method_name, column_start=col_start, column_finish=col_fin) except pycassa.NotFoundException: raise NoIdError('%s %s' % (obj_type, fq_name)) col_infos = list(col_info_iter) if len(col_infos) == 0: raise NoIdError('%s %s' % (obj_type, fq_name)) for (col_name, col_val) in col_infos: obj_uuid = col_name.split(':')[-1] return obj_uuid
def read_vnc_obj(cls, uuid=None, fq_name=None, obj_type=None, fields=None): if uuid is None and fq_name is None: raise NoIdError('') obj_type = obj_type or cls.obj_type if uuid is None: if isinstance(fq_name, basestring): fq_name = fq_name.split(':') uuid = cls._object_db.fq_name_to_uuid(obj_type, fq_name) obj_dict = cls.read_obj(uuid, obj_type, fields) obj = cls.vnc_obj_from_dict(obj_type, obj_dict) obj.clear_pending_updates() return obj
def uuid_to_fq_name(self, id): try: return self._cache_uuid_to_fq_name[id] except KeyError: try: fq_name_json = self._obj_uuid_cf.get(id, columns=['fq_name' ])['fq_name'] except pycassa.NotFoundException: raise NoIdError(id) fq_name = json.loads(fq_name_json) self.cache_uuid_to_fq_name_add(id, fq_name) return fq_name
def uuid_to_obj_type(self, id): try: return self._cache_uuid_to_fq_name[id][1] except KeyError: try: obj = self._obj_uuid_cf.get(id, columns=['fq_name', 'type']) except pycassa.NotFoundException: raise NoIdError(id) fq_name = json.loads(obj['fq_name']) obj_type = json.loads(obj['type']) self.cache_uuid_to_fq_name_add(id, fq_name, obj_type) return obj_type
def object_delete(self, res_type, obj_uuid): obj_type = res_type.replace('-', '_') obj_class = self._get_resource_class(obj_type) obj_uuid_cf = self._obj_uuid_cf try: fq_name = json.loads( obj_uuid_cf.get(obj_uuid, columns=['fq_name'])['fq_name']) except pycassa.NotFoundException: raise NoIdError(obj_uuid) bch = obj_uuid_cf.batch() # unlink from parent col_start = 'parent:' col_fin = 'parent;' col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start=col_start, column_finish=col_fin) for (col_name, col_val) in col_name_iter: (_, parent_type, parent_uuid) = col_name.split(':') self._delete_child(bch, parent_type, parent_uuid, obj_type, obj_uuid) # remove refs col_start = 'ref:' col_fin = 'ref;' col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start=col_start, column_finish=col_fin) for (col_name, col_val) in col_name_iter: (_, ref_type, ref_uuid) = col_name.split(':') self._delete_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid) bch.remove(obj_uuid) bch.send() # Update fqname table fq_name_str = ':'.join(fq_name) fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid self._obj_fq_name_cf.remove(obj_type, columns=[fq_name_col]) return (True, '')
def uuid_to_obj_type(self, id): try: type_json = self._obj_uuid_cf.get(id, columns=['type'])['type'] except pycassa.NotFoundException: raise NoIdError(id) return json.loads(type_json)
def _object_read(self, res_type, obj_uuids, field_names=None): # if field_names=None, all fields will be read/returned obj_type = res_type.replace('-', '_') obj_class = self._get_resource_class(obj_type) obj_uuid_cf = self._obj_uuid_cf # optimize for common case of reading non-backref, non-children fields # ignoring columns starting from 'b' and 'c' - significant performance # impact in scaled setting. e.g. read of project if (field_names is None or (set(field_names) & (obj_class.backref_fields | obj_class.children_fields))): # atleast one backref/children field is needed obj_rows = obj_uuid_cf.multiget(obj_uuids, column_count=self._MAX_COL, include_timestamp=True) else: # ignore reading backref + children columns obj_rows = obj_uuid_cf.multiget(obj_uuids, column_start='d', column_count=self._MAX_COL, include_timestamp=True) if (len(obj_uuids) == 1) and not obj_rows: raise NoIdError(obj_uuids[0]) results = [] for row_key in obj_rows: obj_uuid = row_key obj_cols = obj_rows[obj_uuid] result = {} result['uuid'] = obj_uuid result['fq_name'] = json.loads(obj_cols['fq_name'][0]) for col_name in obj_cols.keys(): if self._re_match_parent.match(col_name): # non config-root child (_, _, parent_uuid) = col_name.split(':') parent_type = json.loads(obj_cols['parent_type'][0]) result['parent_type'] = parent_type try: result['parent_uuid'] = parent_uuid result['parent_href'] = self._generate_url(parent_type, parent_uuid) except NoIdError: err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2] return (False, err_msg) if self._re_match_prop.match(col_name): (_, prop_name) = col_name.split(':') result[prop_name] = json.loads(obj_cols[col_name][0]) if self._re_match_children.match(col_name): (_, child_type, child_uuid) = col_name.split(':') if field_names and '%ss' %(child_type) not in field_names: continue child_tstamp = obj_cols[col_name][1] try: self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp) except NoIdError: continue if self._re_match_ref.match(col_name): (_, ref_type, ref_uuid) = col_name.split(':') self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0]) if self._re_match_backref.match(col_name): (_, back_ref_type, back_ref_uuid) = col_name.split(':') if field_names and '%s_back_refs' %(back_ref_type) not in field_names: continue try: self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid, obj_cols[col_name][0]) except NoIdError: continue # for all column names # sort children by creation time for child_field in obj_class.children_fields: if child_field not in result: continue sorted_children = sorted(result[child_field], key = itemgetter('tstamp')) # re-write result's children without timestamp result[child_field] = sorted_children [child.pop('tstamp') for child in result[child_field]] # for all children results.append(result) # end for all rows return (True, results)