def add_system(): # We accept JSON or form-encoded for convenience if request.json: if 'fqdn' not in request.json: raise BadRequest400('Missing fqdn key') new_fqdn = request.json['fqdn'] elif request.form: if 'fqdn' not in request.form: raise BadRequest400('Missing fqdn parameter') new_fqdn = request.form['fqdn'] else: raise UnsupportedMediaType415 with convert_internal_errors(): if System.query.filter(System.fqdn == new_fqdn).count() != 0: raise Conflict409('System with fqdn %r already exists' % new_fqdn) system = System(fqdn=new_fqdn, owner=identity.current.user) session.add(system) # new systems are visible to everybody by default system.custom_access_policy = SystemAccessPolicy() system.custom_access_policy.add_rule(SystemPermission.view, everybody=True) # XXX this should be 201 with Location: /systems/FQDN/ but 302 is more # convenient because it lets us use a traditional browser form without AJAX # handling, and for now we're redirecting to /view/FQDN until that is moved # to /systems/FQDN/ return flask_redirect(url(u'/view/%s#essentials' % system.fqdn))
def find_user_or_create(user_name): user = User.by_user_name(user_name) if user is None: user = User(user_name=user_name) user.user_name = user_name session.add(user) return user
def _handle_historical(session, data, myth_uuid): showcount = 0 rectime = 0 db_age = 0 reccount = 0 try: myth_hist = data['features']['historical'] except: myth_hist = {} #session.query(mythtvHistorical).filter_by(machine_id = machine_id).delete() session.query(mythtvHistorical).filter_by(myth_uuid=myth_uuid).delete() try: showcount = myth_hist['showcount'] except: pass try: rectime = myth_hist['rectime'] except: pass try: db_age = myth_hist['db_age'] except: pass try: reccount = myth_hist['reccount'] except: pass session.add( mythtvHistorical(myth_uuid, showcount, rectime, db_age, reccount)) session.flush()
def test_system_details_includes_cpus(self): with session.begin(): cpu = Cpu(cores=5, family=6, model=7, model_name='Intel', flags=['beer', 'frob'], processors=6, sockets=2, speed=24, stepping=2, vendor='Transmeta') session.add(cpu) self.system.cpu = cpu response = requests.get( get_server_base() + 'systems/%s' % self.system.fqdn) json = response.json() self.assertEqual([u'beer', u'frob'], json['cpu_flags']) self.assertEqual(5, json['cpu_cores']) self.assertEqual(6, json['cpu_family']) self.assertEqual(7, json['cpu_model']) self.assertEqual(u'Intel', json['cpu_model_name']) self.assertEqual(True, json['cpu_hyper']) self.assertEqual(6, json['cpu_processors']) self.assertEqual(2, json['cpu_sockets']) self.assertEqual(24, json['cpu_speed']) self.assertEqual(2, json['cpu_stepping']) self.assertEqual('Transmeta', json['cpu_vendor'])
def remove(self, **kw): u = identity.current.user try: group = Group.by_id(kw['group_id']) except DatabaseLookupError: flash(unicode('Invalid group or already removed')) redirect('../groups/mine') if not group.can_edit(u): flash(_(u'You are not an owner of group %s' % group)) redirect('../groups/mine') if group.is_protected_group(): flash(_(u'This group %s is predefined and cannot be deleted' % group)) redirect('../groups/mine') if group.jobs: flash(_(u'Cannot delete a group which has associated jobs')) redirect('../groups/mine') # Record the access policy rules that will be removed # before deleting the group for rule in group.system_access_policy_rules: rule.record_deletion() # For any system pool owned by this group, unset owning_group # and set owning_user to the user deleting this group pools = SystemPool.query.filter_by(owning_group_id=group.group_id) for pool in pools: pool.change_owner(user=u, service='WEBUI') session.delete(group) activity = Activity(u, u'WEBUI', u'Removed', u'Group', group.display_name, u"") session.add(activity) flash( _(u"%s deleted") % group.display_name ) raise redirect(".")
def by_user_name(cls, user_name): """ A class method that permits to search users based on their user_name attribute. """ # Try to look up the user via local DB first. user = cls.query.filter_by(user_name=user_name).first() # If user doesn't exist in DB check ldap if enabled. ldapenabled = get('identity.ldap.enabled', False) autocreate = get('identity.soldapprovider.autocreate', False) # Presence of '/' indicates a Kerberos service principal. if not user and ldapenabled and autocreate and '/' not in user_name: filter = ldap.filter.filter_format('(uid=%s)', [user_name.encode('utf8')]) ldapcon = ldap.initialize(get('identity.soldapprovider.uri')) objects = ldapcon.search_st(get('identity.soldapprovider.basedn', ''), ldap.SCOPE_SUBTREE, filter, timeout=get('identity.soldapprovider.timeout', 20)) # no match if(len(objects) == 0): return None # need exact match elif(len(objects) > 1): return None attrs = objects[0][1] # LDAP normalization rules means that we might have found a user # who doesn't actually match the username we were given. if attrs['uid'][0].decode('utf8') != user_name: return None user = User() user.user_name = attrs['uid'][0].decode('utf8') user.display_name = attrs['cn'][0].decode('utf8') user.email_address = attrs['mail'][0].decode('utf8') session.add(user) session.flush() return user
def remove(self, **kw): item = ConfigItem.by_id(kw['id']) item.set(None, None, identity.current.user) session.add(item) session.flush() flash(_(u"%s cleared") % item.description) raise redirect(".")
def delete_group(group_name): """ Deletes a group. :status 204: Group was successfully deleted. :status 400: Group cannot be deleted because it is a predefined group, or because it has associated jobs. """ group = _get_group_by_name(group_name) if not group.can_edit(identity.current.user): raise Forbidden403('Cannot edit group') if group.is_protected_group(): raise BadRequest400("Group '%s' is predefined and cannot be deleted" % group.group_name) if group.jobs: raise BadRequest400('Cannot delete a group which has associated jobs') # Record the access policy rules that will be removed for rule in group.system_access_policy_rules: rule.record_deletion() # For any system pool owned by this group, unset owning_group # and set owning_user to the user deleting this group pools = SystemPool.query.filter_by(owning_group=group) for pool in pools: pool.change_owner(user=identity.current.user, service=u'HTTP') session.delete(group) activity = Activity(identity.current.user, u'HTTP', u'Removed', u'Group', group.display_name) session.add(activity) return '', 204
def remove(self, id, *args, **kw): labcontroller = LabController.by_id(id) labcontroller.removed = datetime.utcnow() systems = System.query.filter_by(lab_controller_id=id).values(System.id) for system_id in systems: sys_activity = SystemActivity(identity.current.user, 'WEBUI', \ 'Changed', 'lab_controller', labcontroller.fqdn, None, system_id=system_id[0]) session.add(sys_activity) System.__table__.update().where(System.lab_controller_id == id).\ values(lab_controller_id=None).execute() watchdogs = Watchdog.by_status(labcontroller=labcontroller, status='active') for w in watchdogs: w.recipe.recipeset.job.cancel(msg='LabController %s has been deleted' % labcontroller.fqdn) for lca in labcontroller._distro_trees: lca.distro_tree.activity.append(DistroTreeActivity( user=identity.current.user, service=u'WEBUI', action=u'Removed', field_name=u'lab_controller_assocs', old_value=u'%s %s' % (lca.lab_controller, lca.url), new_value=None)) session.delete(lca) labcontroller.disabled = True labcontroller.record_activity(user=identity.current.user, service=u'WEBUI', field=u'Disabled', action=u'Changed', old=unicode(False), new=unicode(True)) labcontroller.record_activity(user=identity.current.user, service=u'WEBUI', field=u'Removed', action=u'Changed', old=unicode(False), new=unicode(True)) flash( _(u"%s removed") % labcontroller.fqdn ) raise redirect(".")
def create_distro_tree(distro=None, distro_name=None, osmajor=u'DansAwesomeLinux6', distro_tags=None, arch=u'i386', variant=u'Server', lab_controllers=None, urls=None): if distro is None: if distro_name is None: distro = create_distro(osmajor=osmajor, tags=distro_tags) else: distro = Distro.by_name(distro_name) if not distro: distro = create_distro(name=distro_name) distro_tree = DistroTree.lazy_create(distro=distro, arch=Arch.by_name(arch), variant=variant) session.add(distro_tree) if distro_tree.arch not in distro.osversion.arches: distro.osversion.arches.append(distro_tree.arch) distro_tree.repos.append(DistroTreeRepo(repo_id=variant, repo_type=u'variant', path=u'')) existing_urls = [lc_distro_tree.url for lc_distro_tree in distro_tree.lab_controller_assocs] # make it available in all lab controllers for lc in (lab_controllers or LabController.query): default_urls = [u'%s://%s%s/distros/%s/%s/%s/os/' % (scheme, lc.fqdn, scheme == 'nfs' and ':' or '', distro_tree.distro.name, distro_tree.variant, distro_tree.arch.arch) for scheme in ['nfs', 'http', 'ftp']] for url in (urls or default_urls): if url in existing_urls: break lab_controller_distro_tree = LabControllerDistroTree( lab_controller=lc, url=url) distro_tree.lab_controller_assocs.append(lab_controller_distro_tree) log.debug('Created distro tree %r', distro_tree) return distro_tree
def _create_labcontroller_helper(data): with convert_internal_errors(): if LabController.query.filter_by(fqdn=data['fqdn']).count(): raise Conflict409('Lab Controller %s already exists' % data['fqdn']) user = find_user_or_create(data['user_name']) user = update_user( user=user, display_name=data['fqdn'], email_address=data.get('email_address', user.email_address), password=data.get('password', user.password) ) labcontroller = LabController(fqdn=data['fqdn'], disabled=False) labcontroller.record_activity( user=identity.current.user, service=u'HTTP', action=u'Changed', field=u'FQDN', old=u'', new=data['fqdn']) labcontroller.user = user labcontroller.record_activity( user=identity.current.user, service=u'HTTP', action=u'Changed', field=u'User', old=u'', new=user.user_name) # For backwards compatibility labcontroller.record_activity( user=identity.current.user, service=u'HTTP', action=u'Changed', field=u'Disabled', old=u'', new=unicode(labcontroller.disabled)) session.add(labcontroller) # flush it so we return an id, otherwise we'll end up back in here from # the edit form session.flush() response = jsonify(labcontroller.__json__()) response.status_code = 201 return response
def genkey(self): username = turbogears.identity.current.user_name person = People.by_username(username) created = time.strftime("%Y-%m-%dT%H:%M:%S") hexctr = "%012x" % person.id publicname = hex2modhex(hexctr) internalname = gethexrand(12) aeskey = gethexrand(32) lockcode = gethexrand(12) try: new_ykksm = Ykksm(serialnr=person.id, publicname=publicname, created=created, internalname=internalname, aeskey=aeskey, lockcode=lockcode, creator=username) session.add(new_ykksm) session.flush() except IntegrityError: session.rollback() old_ykksm = session.query(Ykksm).filter_by(serialnr=person.id).all()[0] session.delete(old_ykksm) new_ykksm = Ykksm(serialnr=person.id, publicname=publicname, created=created, internalname=internalname, aeskey=aeskey, lockcode=lockcode, creator=username) old_ykksm = new_ykksm session.flush() try: old_ykval = session.query(Ykval).filter_by(yk_publicname=publicname).all()[0] session.delete(old_ykval) session.flush() except IndexError: # No old record? Maybe they never used their key pass string = "%s %s %s" % (publicname, internalname, aeskey) return dict(key=string)
def add_json_plus_pub_uuid(self, uuid, pub_uuid, host, token, smolt_protocol): self._run_add_json_checks(uuid, host, token, smolt_protocol) res = self.handle_submission(uuid, pub_uuid, host) log_entry = BatchJob(host, uuid, added=True) session.add(log_entry) session.flush() return res
def from_csv(cls,user,data,log): """ Import data from CSV file into user.groups """ if 'group' in data and data['group']: try: group = Group.by_name(data['group']) except InvalidRequestError: group = Group(group_name=data['group'], display_name=data['group']) session.add(group) deleted = False if 'deleted' in data: deleted = smart_bool(data['deleted']) if deleted: if group in user.groups: group.remove_member(user, service=u'CSV', agent=identity.current.user) else: if group not in user.groups: group.add_member(user, service=u'CSV', agent=identity.current.user) else: log.append("%s: group can't be empty!" % user) return False return True
def _handle_pbp(session, data, machine_id): name = "unknown" profiles = ['unknown'] try: myth_pb = data['features']['playbackprofile'] except: myth_pb = {} try: name = myth_pb['name'] except: pass try: profiles = myth_pb['profiles'] except: pass #Remove old entry session.query(mythtvPbp).filter_by(machine_id = machine_id).delete() #Add new entry session.add(mythtvPbp(machine_id,name,profiles)) session.flush()
def create_default_user(user_name, password=None): """Create a default user.""" try: u = User.by_user_name(user_name) except: u = None if u: print "User '%s' already exists in database." % user_name return from getpass import getpass from sys import stdin while password is None: try: password = getpass("Enter password for user '%s': " % user_name.encode(stdin.encoding)).strip() password2 = getpass("Confirm password: "******"Passwords do not match." else: password = password.decode(stdin.encoding) break except (EOFError, KeyboardInterrupt): print "User creation cancelled." return u = User() u.user_name = user_name u.display_name = u"Default User" u.email_address = u"*****@*****.**" % user_name u.password = password session.add(u) session.flush() print "User '%s' created." % user_name
def create_system_activity(user=None, **kw): if not user: user = create_user() activity = SystemActivity(user, u'WEBUI', u'Changed', u'Loaned To', unique_name(u'random_%s'), user.user_name) session.add(activity) return activity
def _handle_pbp(session, data, machine_id): name = "unknown" profiles = ['unknown'] try: myth_pb = data['features']['playbackprofile'] except: myth_pb = {} try: name = myth_pb['name'] except: pass try: profiles = myth_pb['profiles'] except: pass #Remove old entry session.query(mythtvPbp).filter_by(machine_id=machine_id).delete() #Add new entry session.add(mythtvPbp(machine_id, name, profiles)) session.flush()
def _from_csv(cls,system,data,csv_type,log): """ Import data from CSV file into system.groups """ if 'group' in data and data['group']: try: group = Group.by_name(data['group']) except InvalidRequestError: group = Group(group_name=data['group'], display_name=data['group']) session.add(group) session.flush([group]) deleted = False if 'deleted' in data: deleted = smart_bool(data['deleted']) if deleted: if group in system.groups: activity = SystemActivity(identity.current.user, 'CSV', 'Removed', 'group', '%s' % group, '') system.activity.append(activity) system.groups.remove(group) else: if group not in system.groups: system.groups.append(group) activity = SystemActivity(identity.current.user, 'CSV', 'Added', 'group', '', '%s' % group) system.activity.append(activity) else: log.append("%s: group can't be empty!" % system.fqdn) return False return True
def create_job_for_recipesets(recipesets, owner=None, whiteboard=None, cc=None, product=None, retention_tag=None, group=None, submitter=None, **kwargs): if retention_tag is None: retention_tag = RetentionTag.by_tag( u'scratch') # Don't use default, unpredictable else: retention_tag = RetentionTag.by_tag(retention_tag) if owner is None: owner = create_user() if whiteboard is None: whiteboard = unique_name(u'job %s') job = Job(whiteboard=whiteboard, ttasks=sum(rs.ttasks for rs in recipesets), owner=owner, retention_tag=retention_tag, group=group, product=product, submitter=submitter) if cc is not None: job.cc = cc job.recipesets.extend(recipesets) session.add(job) session.flush() log.debug('Created %s', job.t_id) return job
def save(self, id=None, **kw): retention_tag = Tag(tag=kw['tag'], default=kw['default'], needs_product=kw['needs_product'], expire_in_days=kw['expire_in_days']) session.add(retention_tag) flash(_(u"OK")) redirect("./admin")
def from_csv(cls, user, data, log): """ Import data from CSV file into user.groups """ if 'group' in data and data['group']: try: group = Group.by_name(data['group']) except InvalidRequestError: group = Group(group_name=data['group'], display_name=data['group']) session.add(group) deleted = False if 'deleted' in data: deleted = smart_bool(data['deleted']) if deleted: if group in user.groups: group.remove_member(user, service=u'CSV', agent=identity.current.user) else: if group not in user.groups: group.add_member(user, service=u'CSV', agent=identity.current.user) else: log.append("%s: group can't be empty!" % user) return False return True
def _new_group(self, group_id, display_name, group_name, ldap, root_password): user = identity.current.user if ldap and not user.is_admin(): flash(_(u'Only admins can create LDAP groups')) redirect('.') try: Group.by_name(group_name) except NoResultFound: pass else: flash( _(u"Group %s already exists." % group_name) ) redirect(".") group = Group() session.add(group) activity = Activity(user, u'WEBUI', u'Added', u'Group', u"", display_name) group.display_name = display_name group.group_name = group_name group.ldap = ldap if group.ldap: group.refresh_ldap_members() group.root_password = root_password if not ldap: # LDAP groups don't have owners group.user_group_assocs.append(UserGroup(user=user, is_owner=True)) group.activity.append(GroupActivity(user, service=u'WEBUI', action=u'Added', field_name=u'User', old_value=None, new_value=user.user_name)) group.activity.append(GroupActivity(user, service=u'WEBUI', action=u'Added', field_name=u'Owner', old_value=None, new_value=user.user_name)) return group
def from_csv(cls,user,data,log): """ Import data from CSV file into user.groups """ if 'group' in data and data['group']: try: group = Group.by_name(data['group']) except InvalidRequestError: group = Group(group_name=data['group'], display_name=data['group']) session.add(group) deleted = False if 'deleted' in data: deleted = smart_bool(data['deleted']) if deleted: if group in user.groups: group.record_activity(user=identity.current.user, service=u'CSV', field=u'User', action=u'Removed', old=user) user.groups.remove(group) else: if group not in user.groups: group.record_activity(user=identity.current.user, service=u'CSV', field=u'User', action=u'Added', new=user) user.groups.append(group) else: log.append("%s: group can't be empty!" % user) return False return True
def init_db(metadata): logger.info('Creating tables in empty database') if metadata != tg_metadata: metadata.tables = tg_metadata.tables.copy() metadata.create_all() logger.info('Stamping database with Alembic "head" revision') def stamp(rev, context): try: return context.script._stamp_revs('head', rev) except AttributeError: # alembic < 0.7 current = context._current_rev() head = context.script.get_revision('head') context._update_current_rev(current, head.revision) return [] run_alembic_operation(metadata, stamp) # Also mark all data migrations as done, because there is no data to # migrate. This avoids beakerd wasting time trying to run them all when it # first starts up. session = create_session(bind=metadata.bind) with session.begin(): for migration_name in DataMigration.all_names(): logger.info('Marking data migration %s finished', migration_name) session.add(DataMigration(name=migration_name, finish_time=datetime.datetime.utcnow()))
def _handle_historical(session, data,myth_uuid): showcount = 0 rectime = 0 db_age = 0 reccount = 0 try: myth_hist = data['features']['historical'] except: myth_hist = {} #session.query(mythtvHistorical).filter_by(machine_id = machine_id).delete() session.query(mythtvHistorical).filter_by(myth_uuid = myth_uuid).delete() try: showcount = myth_hist['showcount'] except: pass try: rectime = myth_hist['rectime'] except: pass try: db_age = myth_hist['db_age'] except: pass try: reccount = myth_hist['reccount'] except: pass session.add(mythtvHistorical(myth_uuid, showcount,rectime,db_age,reccount) ) session.flush()
def delete_pool(pool_name): """ Deletes a system pool :param pool_name: System pool's name """ pool = _get_pool_by_name(pool_name, lockmode='update') u = identity.current.user if not pool.can_edit(u): raise Forbidden403('Cannot delete pool %s' % pool_name) systems = System.query.filter(System.pools.contains(pool)) System.record_bulk_activity(systems, user=identity.current.user, service=u'HTTP', action=u'Removed', field=u'Pool', old=unicode(pool), new=None) # Since we are deleting the pool, we will have to change the active # access policy for all systems using the pool's policy to their # custom policy systems = System.query.filter(System.active_access_policy == pool.access_policy) for system in systems: system.active_access_policy = system.custom_access_policy System.record_bulk_activity(systems, user=identity.current.user, service=u'HTTP', field=u'Active Access Policy', action=u'Changed', old = 'Pool policy: %s' % pool_name, new = 'Custom access policy') session.delete(pool) activity = Activity(u, u'HTTP', u'Deleted', u'Pool', pool_name) session.add(activity) return '', 204
def create_pool(): """ Creates a new system pool in Beaker. The request must be :mimetype:`application/x-www-form-urlencoded` or :mimetype:`application/json`. :jsonparam string name: Name for the system pool. :jsonparam string description: Description of the system pool. :jsonparam object owner: JSON object containing a ``user_name`` key or ``group_name`` key identifying the owner for the system pool. :status 201: The system pool was successfully created. """ owner = None description = None u = identity.current.user if request.json: if 'name' not in request.json: raise BadRequest400('Missing pool name key') new_name = request.json['name'] if 'owner' in request.json: owner = request.json['owner'] if 'description' in request.json: description = request.json['description'] elif request.form: if 'name' not in request.form: raise BadRequest400('Missing pool name parameter') new_name = request.form['name'] if 'owner' in request.form: owner = request.form['owner'] if 'description' in request.form: description = request.form['description'] else: raise UnsupportedMediaType415 with convert_internal_errors(): if SystemPool.query.filter(SystemPool.name == new_name).count() != 0: raise Conflict409('System pool with name %r already exists' % new_name) pool = SystemPool(name=new_name, description=description) session.add(pool) if owner: owner, owner_type = _get_owner(owner) if owner_type == 'user': pool.owning_user = owner else: pool.owning_group = owner else: pool.owning_user = u # new systems pool are visible to everybody by default pool.access_policy = SystemAccessPolicy() pool.access_policy.add_rule(SystemPermission.view, everybody=True) pool.record_activity(user=u, service=u'HTTP', action=u'Created', field=u'Pool', new=unicode(pool)) response = jsonify(pool.__json__()) response.status_code = 201 response.headers.add('Location', absolute_url(pool.href)) return response
def test_system_details_includes_cpus(self): with session.begin(): cpu = Cpu(cores=5, family=6, model=7, model_name='Intel', flags=['beer', 'frob'], processors=6, sockets=2, speed=24, stepping=2, vendor='Transmeta') session.add(cpu) self.system.cpu = cpu response = requests.get(get_server_base() + 'systems/%s' % self.system.fqdn) json = response.json() self.assertEqual([u'beer', u'frob'], json['cpu_flags']) self.assertEqual(5, json['cpu_cores']) self.assertEqual(6, json['cpu_family']) self.assertEqual(7, json['cpu_model']) self.assertEqual(u'Intel', json['cpu_model_name']) self.assertEqual(True, json['cpu_hyper']) self.assertEqual(6, json['cpu_processors']) self.assertEqual(2, json['cpu_sockets']) self.assertEqual(24, json['cpu_speed']) self.assertEqual(2, json['cpu_stepping']) self.assertEqual('Transmeta', json['cpu_vendor'])
def create_job_for_recipes(recipes, owner=None, whiteboard=None, cc=None,product=None, retention_tag=None, group=None, submitter=None, priority=None, **kwargs): if retention_tag is None: retention_tag = RetentionTag.by_tag(u'scratch') # Don't use default, unpredictable else: retention_tag = RetentionTag.by_tag(retention_tag) if owner is None: owner = create_user() if whiteboard is None: whiteboard = unique_name(u'job %s') job = Job(whiteboard=whiteboard, ttasks=sum(r.ttasks for r in recipes), owner=owner, retention_tag=retention_tag, group=group, product=product, submitter=submitter) if cc is not None: job.cc = cc if priority is None: priority = TaskPriority.default_priority() recipe_set = RecipeSet(ttasks=sum(r.ttasks for r in recipes), priority=priority) recipe_set.recipes.extend(recipes) job.recipesets.append(recipe_set) session.add(job) session.flush() log.debug('Created %s', job.t_id) return job
def update_products(xml_file): dom = etree.parse(xml_file) xpath_string = "//cpe" cpes = dom.xpath(xpath_string) session.begin() try: to_add = {} dupe_errors = [] for cpe in cpes: cpe_text = cpe.text if cpe_text in to_add: dupe_errors.append(cpe_text) else: to_add[cpe_text] = 1 for cpe_to_add in to_add: try: prod = Product.by_name(u"%s" % cpe_to_add) except NoResultFound: session.add(Product(u"%s" % cpe_to_add)) continue session.commit() finally: session.rollback()
def test_system_details_includes_cpus(self): with session.begin(): cpu = Cpu( cores=5, family=6, model=7, model_name="Intel", flags=["beer", "frob"], processors=6, sockets=2, speed=24, stepping=2, vendor="Transmeta", ) session.add(cpu) self.system.cpu = cpu response = requests.get(get_server_base() + "systems/%s" % self.system.fqdn) json = response.json() self.assertEqual([u"beer", u"frob"], json["cpu_flags"]) self.assertEqual(5, json["cpu_cores"]) self.assertEqual(6, json["cpu_family"]) self.assertEqual(7, json["cpu_model"]) self.assertEqual(u"Intel", json["cpu_model_name"]) self.assertEqual(True, json["cpu_hyper"]) self.assertEqual(6, json["cpu_processors"]) self.assertEqual(2, json["cpu_sockets"]) self.assertEqual(24, json["cpu_speed"]) self.assertEqual(2, json["cpu_stepping"]) self.assertEqual("Transmeta", json["cpu_vendor"])
def _from_csv(cls,system,data,csv_type,log): new_data = dict() for c_type in cls.csv_keys: if c_type in data: new_data[c_type] = data[c_type] system.labinfo = LabInfo(**new_data) session.add(system)
def _from_csv(cls, system, data, csv_type, log): new_data = dict() for c_type in cls.csv_keys: if c_type in data: new_data[c_type] = data[c_type] system.labinfo = LabInfo(**new_data) session.add(system)
def create_retention_tag(name=None, default=False, needs_product=False): if name is None: name = unique_name(u'tag%s') new_tag = RetentionTag(name, is_default=default, needs_product=needs_product) session.add(new_tag) return new_tag
def _handle_package_mask(session, data, machine_id): # Find current entries try: package_mask = data['user_package_mask'] except KeyError: package_mask = {} current_package_mask_set = set() for package, atoms in package_mask.items(): for i in atoms: key = (package, i) current_package_mask_set.add(key) # Find old entries old_package_mask_rel_objects = session.query(\ GentooPackageMaskRel).options(\ eagerload('package'), \ eagerload('atom')).\ filter_by(machine_id=machine_id).all() old_package_mask_dict = {} for e in old_package_mask_rel_objects: key = (e.package.name, e.atom.name) old_package_mask_dict[key] = e old_package_mask_set = set(old_package_mask_dict.keys()) # Calculate diff mask_entries_to_add = current_package_mask_set - old_package_mask_set mask_entries_to_remove = old_package_mask_set - current_package_mask_set # Resolve diff for i in mask_entries_to_remove: session.delete(old_package_mask_dict[i]) if mask_entries_to_remove: session.flush() for i in mask_entries_to_add: package, atom = i lookup_or_add_jobs = ( {'thing':'atom', }, {'thing':'package', }, ) for job in lookup_or_add_jobs: thing = job['thing'] details = { 'class_name':pool_class_name(thing, vector_flag=False), 'source_var_name':thing, 'new_object_name':'%s_pool_object' % thing } program = _LOOKUP_OR_ADD_TEMPLATE % details dump_gentoo_python_code(program) exec(program) session.flush() package_id = package_pool_object.id atom_id = atom_pool_object.id mask_rel_object = GentooPackageMaskRel(machine_id, package_id, atom_id) session.add(mask_rel_object)
def create_group(): """ Creates a new user group in Beaker. The request must be :mimetype:`application/json`. :jsonparam string group_name: Symbolic name for the group. :jsonparam string display_name: Human-friendly display name for the group. :jsonparam string description: Description of the group. :jsonparam string root_password: Optional root password for group jobs. If this is not set, group jobs will use the root password preferences of the job submitter. :jsonparam string membership_type: Specifies how group membership is populated. Possible values are: * normal: Group is initially empty, members are explicitly added and removed by group owner. * ldap: Membership is populated from the LDAP group with the same group name. * inverted: Group contains all Beaker users *except* users who have been explicitly excluded by the group owner. :status 201: The group was successfully created. """ user = identity.current.user data = read_json_request(request) if 'group_name' not in data: raise BadRequest400('Missing group_name key') if 'display_name' not in data: raise BadRequest400('Missing display_name key') # for backwards compatibility if data.pop('ldap', False): data['membership_type'] = 'ldap' try: Group.by_name(data['group_name']) except NoResultFound: pass else: raise Conflict409("Group '%s' already exists" % data['group_name']) with convert_internal_errors(): group = Group.lazy_create(group_name=data['group_name']) group.display_name = data['display_name'] group.description = data.get('description') group.root_password = data.get('root_password') session.add(group) group.record_activity(user=user, service=u'HTTP', field=u'Group', action=u'Created') if data.get('membership_type'): group.membership_type = GroupMembershipType.from_string( data['membership_type']) if group.membership_type == GroupMembershipType.ldap: group.refresh_ldap_members() else: # LDAP groups don't have any owners group.add_member(user, is_owner=True, agent=identity.current.user) response = jsonify(group.__json__()) response.status_code = 201 response.headers.add('Location', absolute_url(group.href)) return response
def create(self, kw): """ Creates a new group. The *kw* argument must be an XML-RPC structure (dict) specifying the following keys: 'group_name' Group name (maximum 16 characters) 'display_name' Group display name 'ldap' Populate users from LDAP (True/False) Returns a message whether the group was successfully created or raises an exception on failure. """ display_name = kw.get('display_name') group_name = kw.get('group_name') ldap = kw.get('ldap') password = kw.get('root_password') if ldap and not identity.current.user.is_admin(): raise BX(_(u'Only admins can create LDAP groups')) try: group = Group.by_name(group_name) except NoResultFound: #validate GroupFormSchema.fields['group_name'].to_python(group_name) GroupFormSchema.fields['display_name'].to_python(display_name) group = Group() session.add(group) group.record_activity(user=identity.current.user, service=u'XMLRPC', field=u'Group', action=u'Created') group.display_name = display_name group.group_name = group_name group.ldap = ldap group.root_password = password user = identity.current.user if not ldap: group.user_group_assocs.append(UserGroup(user=user, is_owner=True)) group.activity.append(GroupActivity(user, service=u'XMLRPC', action=u'Added', field_name=u'User', old_value=None, new_value=user.user_name)) group.activity.append(GroupActivity(user, service=u'XMLRPC', action=u'Added', field_name=u'Owner', old_value=None, new_value=user.user_name)) if group.ldap: group.refresh_ldap_members() return 'Group created: %s.' % group_name else: raise BX(_(u'Group already exists: %s.' % group_name))
def _from_csv(cls,system,data,csv_type,log): """ Import data from CSV file into System Objects """ if 'key' in data and data['key']: try: key = Key.by_name(data['key']) except InvalidRequestError: log.append('%s: Invalid Key %s ' % (system.fqdn, data['key'])) return False else: log.append('%s: Key must not be blank!' % system.fqdn) return False if 'key_value' in data and data['key_value']: if key.numeric: system_key_values = system.key_values_int try: key_value = Key_Value_Int.by_key_value(system, key, data['key_value']) except InvalidRequestError: key_value = Key_Value_Int(key=key, key_value=data['key_value']) else: system_key_values = system.key_values_string try: key_value = Key_Value_String.by_key_value(system, key, data['key_value']) except InvalidRequestError: key_value = Key_Value_String(key=key, key_value=data['key_value']) else: log.append('%s: Key Value must not be blank!' % system.fqdn) return False deleted = False if 'deleted' in data: deleted = smart_bool(data['deleted']) if deleted: if key_value in system_key_values: system.record_activity(user=identity.current.user, service=u'CSV', action=u'Removed', field=u'Key/Value', old=u'%s/%s' % (data['key'],data['key_value']), new=u'') system_key_values.remove(key_value) if not key_value.id: session.expunge(key_value) else: if key_value not in system_key_values: system.record_activity(user=identity.current.user, service=u'CSV', action=u'Added', field=u'Key/Value', old=u'', new=u'%s/%s' % (data['key'],data['key_value'])) system_key_values.append(key_value) session.add(key_value) return True
def create_task(name=None, exclude_arches=None, exclusive_arches=None, exclude_osmajors=None, exclusive_osmajors=None, version=u'1.0-1', uploader=None, owner=None, priority=u'Manual', valid=None, path=None, description=None, requires=None, runfor=None, type=None, avg_time=1200): if name is None: name = unique_name(u'/distribution/test_task_%s') if path is None: path = u'/mnt/tests/%s' % name if description is None: description = unique_name(u'description%s') if uploader is None: uploader = create_user(user_name=u'task-uploader%s' % name.replace('/', '-')) if owner is None: owner = u'*****@*****.**' % name.replace('/', '-') if valid is None: valid = True rpm = u'example%s-%s.noarch.rpm' % (name.replace('/', '-'), version) task = Task(name=name) task.rpm = rpm task.version = version task.uploader = uploader task.owner = owner task.priority = priority task.valid = valid task.path = path task.description = description task.avg_time = avg_time task.license = u'GPLv99+' if type: for t in type: task.types.append(TaskType.lazy_create(type=t)) if exclude_arches: for arch in exclude_arches: task.excluded_arches.append(Arch.by_name(arch)) if exclusive_arches: for arch in exclusive_arches: task.exclusive_arches.append(Arch.by_name(arch)) if exclude_osmajors: for osmajor in exclude_osmajors: task.excluded_osmajors.append(OSMajor.lazy_create(osmajor=osmajor)) if exclusive_osmajors: for osmajor in exclusive_osmajors: task.exclusive_osmajors.append(OSMajor.lazy_create(osmajor=osmajor)) if requires: for require in requires: tp = TaskPackage.lazy_create(package=require) task.required.append(tp) if runfor: for run in runfor: task.runfor.append(TaskPackage.lazy_create(package=run)) session.add(task) session.flush() log.debug('Created task %s', task.name) return task
def save(self, **kw): if kw['id']: key = Key.by_id(kw['id']) key.key_name = kw['key_name'] else: key = Key(key_name=kw['key_name']) session.add(key) if 'numeric' in kw: key.numeric = kw['numeric'] flash( _(u"OK") ) redirect(".")
def save(self, **kw): if kw.get('id'): report = ExternalReport.by_id(kw['id']) else: report = ExternalReport() report.name = kw.get('name') report.url = kw.get('url') report.description = kw.get('description') session.add(report) flash(_(u"%s saved" % report.name)) redirect(".")
def _create_test_user(): obj = User() obj.user_name = u"creosote" obj.email_address = u"*****@*****.**" obj.display_name = u"Mr Creosote" obj.password = u"Wafer-thin Mint" # mark object as 'to be saved' session.add(obj) # flush marked object to database session.flush() return obj
def create(self, kw): """ Creates a new group. The *kw* argument must be an XML-RPC structure (dict) specifying the following keys: 'group_name' Group name (maximum 16 characters) 'display_name' Group display name 'description' Group description 'ldap' Populate users from LDAP (True/False) Returns a message whether the group was successfully created or raises an exception on failure. """ display_name = kw.get('display_name') group_name = kw.get('group_name') description = kw.get('description') ldap = kw.get('ldap') password = kw.get('root_password') if ldap and not identity.current.user.is_admin(): raise BX(_(u'Only admins can create LDAP groups')) if ldap and not config.get("identity.ldap.enabled", False): raise BX(_(u'LDAP is not enabled')) try: group = Group.by_name(group_name) except NoResultFound: group = Group() session.add(group) group.record_activity(user=identity.current.user, service=u'XMLRPC', field=u'Group', action=u'Created') group.display_name = display_name group.group_name = group_name group.description = description group.root_password = password if ldap: group.membership_type = GroupMembershipType.ldap group.refresh_ldap_members() else: group.add_member(identity.current.user, is_owner=True, service=u'XMLRPC', agent=identity.current.user) return 'Group created: %s.' % group_name else: raise BX(_(u'Group already exists: %s.' % group_name))