def reject(self, person_name): """Reject a user's FPCA. This method will remove a user from the FPCA group and any other groups that they are in that require the FPCA. It is used when a person has to fulfill some more legal requirements before having a valid FPCA. Arguments :person_name: Name of the person to reject. """ show = {} show["show_postal_address"] = config.get("show_postal_address") exc = None user = People.by_username(turbogears.identity.current.user_name) if not is_admin(user): # Only admins can use this turbogears.flash(_("You are not allowed to reject FPCAs.")) exc = "NotAuthorized" else: # Unapprove the cla and all dependent groups person = People.by_username(person_name) for role in person.roles: if self._cla_dependent(role.group): role.role_status = "unapproved" try: session.flush() except DBAPIError, error: turbogears.flash( _("Error removing cla and dependent groups" " for %(person)s\n Error was: %(error)s") % {"person": person_name, "error": str(error)} ) exc = "DBAPIError"
def rate(self, query_id, rating, *args, **kwargs): ''' Rates the chat. Updates user score and statistics. @param query_id: needed to identify chat session @param rate: points given ''' cs = ChatSession.select(ChatSession.c.query_id==query_id)[0]; if not cs.user_id == identity.current.user.user_id: return dict() cs.rating = int(rating) cs.status = "RATED" session.save(cs); session.flush(); expert=session.query(User).get_by(user_id=int(cs.expert_id)) stats = expert.user_stats new_value=round(stats.average_rating*stats.no_of_ques_answered_rated) stats.no_of_ques_answered_rated+=1 stats.average_rating=(new_value+float(rating))/stats.no_of_ques_answered_rated stats.score=round(stats.no_of_ques_answered_rated * stats.average_rating + stats.no_of_blog_ratings * stats.average_blog_rating) session.flush(); return dict()
def send(self, query_id, *args, **kwargs): ''' Posts a chat message. @param query_id: needed to identify chat session to which chat message should be appended @param **kwargs: contains the text of the message ''' cs = ChatSession.select(ChatSession.c.query_id==query_id)[0]; text = "" try: text = kwargs["text"] text = re.sub("http://\S+","<a href=\"\g<0>\" target=\"_blank\">\g<0></a>",text); text = re.sub("(?<!http://)www\.\S+","<a href=\"http://\g<0>\" target=\"_blank\">\g<0></a>",text); except: print "No text send in chat" pass #limit to field max size text = text[:2000] cm = ChatMessage(session_id=cs.session_id, user_id = identity.current.user.user_id, type = "POSTED", text = text) session.flush(); return dict()
def _handle_pbp(session, data, machine_id): name = "unknown" profiles = ['unknown'] try: myth_pb = data['features']['playbackprofile'] except: myth_pb = {} try: name = myth_pb['name'] except: pass try: profiles = myth_pb['profiles'] except: pass #Remove old entry session.query(mythtvPbp).filter_by(machine_id=machine_id).delete() #Add new entry session.add(mythtvPbp(machine_id, name, profiles)) session.flush()
def system_command(fqdn): system = _get_system_by_FQDN(fqdn) if not system.lab_controller: raise BadRequest400('System is not attached to a lab controller') if not system.can_power(identity.current.user): raise Forbidden403('You do not have permission to control this system') # We accept JSON or form-encoded for convenience if request.json: if 'action' not in request.json: raise BadRequest400('Missing action key') action = request.json['action'] elif request.form: if 'action' not in request.form: raise BadRequest400('Missing action parameter') action = request.form['action'] else: raise UnsupportedMediaType415 if action == 'reboot': raise BadRequest400('"reboot" is not a valid power command, ' 'send "off" followed by "on" instead') elif action in ['on', 'off', 'interrupt']: if not system.power: raise BadRequest400('System is not configured for power support') command = system.action_power(service=u'HTTP', action=action) elif action == 'clear_netboot': command = system.clear_netboot(service=u'HTTP') else: raise BadRequest400('Unknown action %r' % action) session.flush() # for created attribute return jsonify(command.__json__())
def test_systems_by_cpu_type(self): vendor = u'AcmeCorp' common_cpu_family = 1 common_cpu_stepping = 2 rare_cpu_family = 3 rare_cpu_stepping = 4 for _ in range(2): data_setup.create_system().cpu = Cpu(vendor=vendor, model=1, family=rare_cpu_family, stepping=rare_cpu_stepping) for _ in range(20): data_setup.create_system().cpu = Cpu(vendor=vendor, model=1, family=common_cpu_family, stepping=common_cpu_stepping) data_setup.create_system(status=SystemStatus.removed).cpu = \ Cpu(vendor=vendor) data_setup.create_system().cpu = None # un-inventoried session.flush() rows = list(self.execute_reporting_query('system-count-by-cpu')) # un-inventoried systems should show up at the top self.assertEquals(rows[0].cpu_vendor, None) self.assertEquals(rows[0].cpu_model, None) self.assertEquals(rows[0].cpu_family, None) self.assertEquals(rows[0].cpu_stepping, None) self.assert_(rows[0].system_count >= 1, rows[0]) # check for fake CPUs created above vendor_rows = [row for row in rows if row.cpu_vendor == vendor] self.assertEquals(len(vendor_rows), 2, vendor_rows) self.assertEquals(vendor_rows[0].cpu_model, 1) self.assertEquals(vendor_rows[0].cpu_family, common_cpu_family) self.assertEquals(vendor_rows[0].cpu_stepping, common_cpu_stepping) self.assertEquals(vendor_rows[0].system_count, 20) self.assertEquals(vendor_rows[1].cpu_model, 1) self.assertEquals(vendor_rows[1].cpu_family, rare_cpu_family) self.assertEquals(vendor_rows[1].cpu_stepping, rare_cpu_stepping) self.assertEquals(vendor_rows[1].system_count, 2)
def test_job_priority_changes(self): user1 = data_setup.create_user() user2 = data_setup.create_user() job1 = data_setup.create_job(owner=user1) job2 = data_setup.create_job(owner=user1) job3 = data_setup.create_job(owner=user2) job4 = data_setup.create_job(owner=user2) for j in [job1, job2, job3]: for rs in j.recipesets: activity = RecipeSetActivity(j.owner, 'TEST', 'Changed', 'Priority', rs.priority.value, TaskPriority.high) activity.created = datetime.datetime(year=2012, month=10, day=10) rs.activity.append(activity) session.flush() rows = self.execute_reporting_query('job-priority-changes-by-user') all_rows = rows.fetchall() user1_rows = [ row for row in all_rows if row.user_name == user1.user_name ] user2_rows = [ row for row in all_rows if row.user_name == user2.user_name ] self.assertTrue(user1_rows[0].number_of_jobs_changed, 2) self.assertTrue(user2_rows[0].number_of_jobs_changed, 1)
def promote_pending_user(self, pending_user): ''' Promotes pending user to the official 'users'. @param pending_user: The pending user object @return: the new user object. ''' # Let's try to do this programmatically. The only thing you should have to modify # if you changed the schema fo RegistrationPendingUser is the 'excluded' list. All # columns not in this list will be mapped straight to a new user object. # This list contains the columns from RegistrationPendingUser that you DON'T want # to migrate excluded = ['created', 'validation_key'] columns = pending_user.c.keys() # list of column names new_columns = dict() for c in columns: if c not in excluded: new_columns[c] = getattr(pending_user, c) UserClass = register_model.user_class_finder.user_class new_user = UserClass(**new_columns) if new_user: new_user.user_stats = UserStats() session.flush() return new_user
def _handle_historical(session, data, myth_uuid): showcount = 0 rectime = 0 db_age = 0 reccount = 0 try: myth_hist = data['features']['historical'] except: myth_hist = {} #session.query(mythtvHistorical).filter_by(machine_id = machine_id).delete() session.query(mythtvHistorical).filter_by(myth_uuid=myth_uuid).delete() try: showcount = myth_hist['showcount'] except: pass try: rectime = myth_hist['rectime'] except: pass try: db_age = myth_hist['db_age'] except: pass try: reccount = myth_hist['reccount'] except: pass session.add( mythtvHistorical(myth_uuid, showcount, rectime, db_age, reccount)) session.flush()
def test_machine_hours(self): user = data_setup.create_user() # recipes/reservations straddle the boundary of the reporting period # to test we clamp them properly data_setup.create_completed_job( owner=user, distro_tree=data_setup.create_distro_tree(arch=u'ia64'), start_time=datetime.datetime(2012, 9, 30, 23, 0, 0), finish_time=datetime.datetime(2012, 10, 1, 1, 0, 0)) data_setup.create_manual_reservation( user=user, system=data_setup.create_system(arch=u'ia64'), start=datetime.datetime(2012, 10, 31, 22, 30, 0), finish=datetime.datetime(2012, 11, 1, 1, 0, 0)) data_setup.create_completed_job( owner=user, distro_tree=data_setup.create_distro_tree(arch=u'ppc64'), start_time=datetime.datetime(2012, 9, 30, 20, 0, 0), finish_time=datetime.datetime(2012, 10, 1, 2, 0, 0)) data_setup.create_manual_reservation( user=user, system=data_setup.create_system(arch=u'ppc64'), start=datetime.datetime(2012, 10, 31, 23, 0, 0), finish=datetime.datetime(2012, 11, 1, 10, 0, 0)) session.flush() rows = self.execute_reporting_query('machine-hours-by-user-arch') user_rows = [row for row in rows if row.username == user.user_name] self.assertEquals(len(user_rows), 2, user_rows) self.assertEquals(user_rows[0].arch, 'ia64') self.assertEquals(user_rows[0].machine_hours, Decimal('2.5')) self.assertEquals(user_rows[1].arch, 'ppc64') self.assertEquals(user_rows[1].machine_hours, Decimal('3.0'))
def create_default_user(user_name, password=None): """Create a default user.""" try: u = User.by_user_name(user_name) except: u = None if u: print "User '%s' already exists in database." % user_name return from getpass import getpass from sys import stdin while password is None: try: password = getpass("Enter password for user '%s': " % user_name.encode(stdin.encoding)).strip() password2 = getpass("Confirm password: "******"Passwords do not match." else: password = password.decode(stdin.encoding) break except (EOFError, KeyboardInterrupt): print "User creation cancelled." return u = User() u.user_name = user_name u.display_name = u"Default User" u.email_address = u"*****@*****.**" % user_name u.password = password session.add(u) session.flush() print "User '%s' created." % user_name
def test_machine_hours(self): user = data_setup.create_user() # recipes/reservations straddle the boundary of the reporting period # to test we clamp them properly data_setup.create_completed_job(owner=user, distro_tree=data_setup.create_distro_tree(arch=u'ia64'), start_time=datetime.datetime(2012, 9, 30, 23, 0, 0), finish_time=datetime.datetime(2012, 10, 1, 1, 0, 0)) data_setup.create_manual_reservation(user=user, system=data_setup.create_system(arch=u'ia64'), start=datetime.datetime(2012, 10, 31, 22, 30, 0), finish=datetime.datetime(2012, 11, 1, 1, 0, 0)) data_setup.create_completed_job(owner=user, distro_tree=data_setup.create_distro_tree(arch=u'ppc64'), start_time=datetime.datetime(2012, 9, 30, 20, 0, 0), finish_time=datetime.datetime(2012, 10, 1, 2, 0, 0)) data_setup.create_manual_reservation(user=user, system=data_setup.create_system(arch=u'ppc64'), start=datetime.datetime(2012, 10, 31, 23, 0, 0), finish=datetime.datetime(2012, 11, 1, 10, 0, 0)) session.flush() rows = self.execute_reporting_query('machine-hours-by-user-arch') user_rows = [row for row in rows if row.username == user.user_name] self.assertEquals(len(user_rows), 2, user_rows) self.assertEquals(user_rows[0].arch, 'ia64') self.assertEquals(user_rows[0].machine_hours, Decimal('2.5')) self.assertEquals(user_rows[1].arch, 'ppc64') self.assertEquals(user_rows[1].machine_hours, Decimal('3.0'))
def test_abort_recipe_bubbles_status_to_job(self): xmljob = lxml.etree.fromstring(''' <job> <whiteboard>job </whiteboard> <recipeSet> <recipe> <distroRequires> <distro_name op="=" value="BlueShoeLinux5-5" /> </distroRequires> <hostRequires/> <task name="/distribution/check-install" role="STANDALONE"> <params/> </task> </recipe> </recipeSet> <recipeSet> <recipe> <distroRequires> <distro_name op="=" value="BlueShoeLinux5-5" /> </distroRequires> <hostRequires/> <task name="/distribution/check-install" role="STANDALONE"> <params/> </task> </recipe> </recipeSet> </job> ''') job = self.controller.process_xmljob(xmljob, self.user) session.flush() for recipeset in job.recipesets: for recipe in recipeset.recipes: recipe.process() recipe.queue() recipe.schedule() recipe.waiting() # Abort the first recipe. job.recipesets[0].recipes[0].abort() job.update_status() # Verify that it and its children are aborted. self.assertEquals(job.recipesets[0].recipes[0].status, TaskStatus.aborted) for task in job.recipesets[0].recipes[0].tasks: self.assertEquals(task.status, TaskStatus.aborted) # Verify that the second recipe and its children are still waiting. self.assertEquals(job.recipesets[1].recipes[0].status, TaskStatus.waiting) for task in job.recipesets[1].recipes[0].tasks: self.assertEquals(task.status, TaskStatus.waiting) # Verify that the job still shows waiting. self.assertEquals(job.status, TaskStatus.waiting) # Abort the second recipe now. job.recipesets[1].recipes[0].abort() job.update_status() # Verify that the whole job shows aborted now. self.assertEquals(job.status, TaskStatus.aborted)
def create_job_for_recipesets(recipesets, owner=None, whiteboard=None, cc=None, product=None, retention_tag=None, group=None, submitter=None, **kwargs): if retention_tag is None: retention_tag = RetentionTag.by_tag( u'scratch') # Don't use default, unpredictable else: retention_tag = RetentionTag.by_tag(retention_tag) if owner is None: owner = create_user() if whiteboard is None: whiteboard = unique_name(u'job %s') job = Job(whiteboard=whiteboard, ttasks=sum(rs.ttasks for rs in recipesets), owner=owner, retention_tag=retention_tag, group=group, product=product, submitter=submitter) if cc is not None: job.cc = cc job.recipesets.extend(recipesets) session.add(job) session.flush() log.debug('Created %s', job.t_id) return job
def test_log_not_delete(self): # Job that is not within it's expiry time with session.begin(): job_not_delete = data_setup.create_completed_job( start_time=datetime.datetime.utcnow() - datetime.timedelta(days=60), finish_time=datetime.datetime.utcnow() - datetime.timedelta(days=29)) job_not_delete.recipesets[0].recipes[0].logs.append( LogRecipe(filename=u'test.log')) r_not_delete = job_not_delete.recipesets[0].recipes[0] dir_not_delete = os.path.join(r_not_delete.logspath, r_not_delete.filepath) self.make_dir(dir_not_delete) ft = open(os.path.join(dir_not_delete, 'test.log'), 'w') ft.close() session.flush() run_command('log_delete.py', 'beaker-log-delete') self.assertRaises(AssertionError, self._assert_logs_not_in_db, self.job_to_delete) try: self.check_dir_not_there(dir_not_delete) raise Exception('%s was deleted when it shold not have been' % dir_not_delete) except AssertionError: pass
def isOnline(self): ''' @return: Boolean (true if user is online). ''' user_stats = self.user_stats # reload if status is old if not user_stats.isOnlineLastUpdated or user_stats.isOnlineLastUpdated < datetime.now() - timedelta(seconds = constants.isOnlineExpire): t=[] visit=session.query(Visit).select(and_(VisitIdentity.c.visit_key == Visit.c.visit_key, self.user_id == VisitIdentity.c.user_id)) isOnline = 0 for v in visit: t.append(v.expiry) t.sort() t.reverse() if len(t) > 0 and datetime.now()<t[0]: isOnline = 1 user_stats.isOnline = isOnline user_stats.isOnlineLastUpdated = datetime.now() session.save(user_stats) session.flush() return user_stats.isOnline == 1
def rate_object(self, **kwargs): #log.info('args = %s' % str(args)) #log.info('kwargs = %s' % str(kwargs)) id = kwargs.get("ratingID") rating = kwargs.get("value") print "ID: %s" % id print "RATING: %s" % rating if id.startswith("Host"): sep = id.find("@") if sep == -1: host_id = id[4:] host = session.query(Host).filter_by(uuid=host_id).one() host.rating = int(rating) session.flush() return dict() host_id = id[4:sep] id = id[sep+1:] if id.startswith("Device"): device_id = int(id[6:]) host = session.query(Host).filter_by(uuid=host_id).one() for device in host.devices: if device.device_id == device_id: device.rating = int(rating) session.flush([host, device]) return dict() return dict()
def add_json_plus_pub_uuid(self, uuid, pub_uuid, host, token, smolt_protocol): self._run_add_json_checks(uuid, host, token, smolt_protocol) res = self.handle_submission(uuid, pub_uuid, host) log_entry = BatchJob(host, uuid, added=True) session.add(log_entry) session.flush() return res
def rate_object(self, **kwargs): #log.info('args = %s' % str(args)) #log.info('kwargs = %s' % str(kwargs)) id = kwargs.get("ratingID") rating = kwargs.get("value") print "ID: %s" % id print "RATING: %s" % rating if id.startswith("Host"): sep = id.find("@") if sep == -1: host_id = id[4:] host = session.query(Host).filter_by(uuid=host_id).one() host.rating = int(rating) session.flush() return dict() host_id = id[4:sep] id = id[sep + 1:] if id.startswith("Device"): device_id = int(id[6:]) host = session.query(Host).filter_by(uuid=host_id).one() for device in host.devices: if device.device_id == device_id: device.rating = int(rating) session.flush([host, device]) return dict() return dict()
def validate_email_change(self, email, key): ''' Validates the email address change and update the database appropriately. @param email: The email to be verified @param key: The verification key @return: Data for template population ''' is_valid = False admin_email = config.get('registration.mail.admin_email') email_change = register_model.RegistrationUserEmailChange.get_by_new_email(email) if not email_change: return dict(is_valid=False, admin_email=admin_email) if email_change.validation_key == key: is_valid = True user = email_change.user # change the user's email address and delete the email_change record user.email_address = email session.save(user) session.flush() email_change.destroy_self() else: return dict(is_valid=False, admin_email=admin_email) return dict(is_valid=is_valid, email=email, name=user.display_name, admin_email=admin_email)
def deleteQuery(self, query_id, *args, **kwargs): ''' Allows user to delete a query. Updates query logging. @param query_id: identifies the query @return: status of attempted delete operation of the query ''' query = session.query(Query).get_by(query_id=int(query_id)) status = "" if not query: status = "Query not found" elif session.query(ChatSession).get_by(query_id=int(query_id)): status = "Chat already started" elif query.user_id != identity.current.user.user_id: status = "Permission denied" else: query.experts[:] = [] query_log=QueryLog( query_id = int(query_id), user_id = query.user_id, user_name = session.query(User).get_by(user_id=query.user_id).user_name, created = datetime.now(), status = 'Deleted') session.save(query_log) session.flush() session.delete(query); session.flush(); return dict(status=status)
def test_mark_note_as_deleted(self): # Notes never get actually deleted, they just get marked as "deleted" # which hides them by default in the UI. "Obsoleted" would be a better # word but "deleted" is what we have. with session.begin(): note_text = u'some obsolete info' self.system.notes.append(Note(text=note_text, user=self.owner)) session.flush() note_id = self.system.notes[0].id s = requests.Session() requests_login(s, user=self.owner.user_name, password=u'owner') response = patch_json(get_server_base() + 'systems/%s/notes/%s' % (self.system.fqdn, note_id), session=s, data={'deleted': 'now'}) response.raise_for_status() self.assertEquals(response.json()['id'], note_id) assert_datetime_within(datetime.datetime.strptime( response.json()['deleted'], '%Y-%m-%d %H:%M:%S'), reference=datetime.datetime.utcnow(), tolerance=datetime.timedelta(seconds=10)) with session.begin(): session.refresh(self.system.notes[0]) assert_datetime_within(self.system.notes[0].deleted, reference=datetime.datetime.utcnow(), tolerance=datetime.timedelta(seconds=10))
def _create_labcontroller_helper(data): with convert_internal_errors(): if LabController.query.filter_by(fqdn=data['fqdn']).count(): raise Conflict409('Lab Controller %s already exists' % data['fqdn']) user = find_user_or_create(data['user_name']) user = update_user( user=user, display_name=data['fqdn'], email_address=data.get('email_address', user.email_address), password=data.get('password', user.password) ) labcontroller = LabController(fqdn=data['fqdn'], disabled=False) labcontroller.record_activity( user=identity.current.user, service=u'HTTP', action=u'Changed', field=u'FQDN', old=u'', new=data['fqdn']) labcontroller.user = user labcontroller.record_activity( user=identity.current.user, service=u'HTTP', action=u'Changed', field=u'User', old=u'', new=user.user_name) # For backwards compatibility labcontroller.record_activity( user=identity.current.user, service=u'HTTP', action=u'Changed', field=u'Disabled', old=u'', new=unicode(labcontroller.disabled)) session.add(labcontroller) # flush it so we return an id, otherwise we'll end up back in here from # the edit form session.flush() response = jsonify(labcontroller.__json__()) response.status_code = 201 return response
def add_ssh_public_key(username): """ Adds a new SSH public key for the given user account. Accepts mimetype:`text/plain` request bodies containing the SSH public key in the conventional OpenSSH format: <keytype> <key> <ident>. :param username: The user's username. """ user = _get_user(username) if not user.can_edit(identity.current.user): raise Forbidden403('Cannot edit user %s' % user) if request.mimetype != 'text/plain': raise UnsupportedMediaType415('Request content type must be text/plain') with convert_internal_errors(): keytext = request.data.strip() if '\n' in keytext: raise ValueError('SSH public keys may not contain newlines') elements = keytext.split(None, 2) if len(elements) != 3: raise ValueError('Invalid SSH public key') key = SSHPubKey(*elements) user.sshpubkeys.append(key) session.flush() # to populate id return jsonify(key.__json__())
def remove(self, **kw): item = ConfigItem.by_id(kw['id']) item.set(None, None, identity.current.user) session.add(item) session.flush() flash(_(u"%s cleared") % item.description) raise redirect(".")
def save(self, **kw): try: RetentionTagUtility.save_tag(**kw) session.flush() except Exception, e: log.error('Error inserting tag: %s and default: %s' % (kw.get('tag'), kw.get('default_'))) flash(_(u"Problem saving tag %s" % kw.get('tag')))
def test_job_priority_changes(self): user1 = data_setup.create_user() user2 = data_setup.create_user() job1 = data_setup.create_job(owner=user1) job2 = data_setup.create_job(owner=user1) job3 = data_setup.create_job(owner=user2) job4 = data_setup.create_job(owner=user2) for j in [job1, job2, job3]: for rs in j.recipesets: activity = RecipeSetActivity(j.owner, 'TEST', 'Changed', 'Priority', rs.priority.value, TaskPriority.high) activity.created = datetime.datetime(year=2012, month=10, day=10) rs.activity.append(activity) session.flush() rows = self.execute_reporting_query('job-priority-changes-by-user') all_rows = rows.fetchall() user1_rows = [row for row in all_rows if row.user_name == user1.user_name] user2_rows = [row for row in all_rows if row.user_name == user2.user_name] self.assertTrue(user1_rows[0].number_of_jobs_changed, 2) self.assertTrue(user2_rows[0].number_of_jobs_changed, 1)
def test_counts(self): lc = data_setup.create_labcontroller() manual_system = data_setup.create_system(lab_controller=lc) data_setup.create_manual_reservation(manual_system, start=datetime.datetime(2012, 1, 1, 0, 0, 0)) recipe_system = data_setup.create_system(lab_controller=lc) data_setup.mark_recipe_running( data_setup.create_job().recipesets[0].recipes[0], system=recipe_system) idle_manual_system = data_setup.create_system(lab_controller=lc, status=SystemStatus.manual) idle_automated_system = data_setup.create_system(lab_controller=lc, status=SystemStatus.automated) idle_broken_system = data_setup.create_system(lab_controller=lc, status=SystemStatus.broken) idle_removed_system = data_setup.create_system(lab_controller=lc, status=SystemStatus.removed) session.flush() counts = system_utilisation_counts(System.query.filter( System.lab_controller == lc)) self.assertEqual(counts['recipe'], 1) self.assertEqual(counts['manual'], 1) self.assertEqual(counts['idle_manual'], 1) self.assertEqual(counts['idle_automated'], 1) self.assertEqual(counts['idle_broken'], 1) self.assertEqual(counts['idle_removed'], 1)
def basic_groups(): groups = { 'issuer': Group(**dict(group_name='issuer')), 'bidder': Group(**dict(group_name='bidder')), 'admin': Group(**dict(group_name='admin')) } session.flush()
def test_recovers_running_job_with_completed_recipes(self): # job with two recipes, both Completed, but job is Running # and systems are still assigned job = data_setup.create_job(num_recipes=2) data_setup.mark_job_running(job) systems = [r.resource.system for r in job.all_recipes] job.recipesets[0].recipes[0].tasks[-1].stop() job.recipesets[0].recipes[0]._update_status() job.recipesets[0].recipes[1].tasks[-1].stop() job.recipesets[0].recipes[1]._update_status() session.flush() self.assertEquals(job.recipesets[0].recipes[0].status, TaskStatus.completed) self.assertEquals(job.recipesets[0].recipes[1].status, TaskStatus.completed) self.assertEquals(job.recipesets[0].status, TaskStatus.running) self.assertEquals(job.status, TaskStatus.running) self.assert_(systems[0].open_reservation is not None) self.assert_(systems[1].open_reservation is not None) job.update_status() session.flush() session.expire_all() self.assertEquals(systems[0].open_reservation, None) self.assertEquals(systems[1].open_reservation, None) self.assertEquals(job.recipesets[0].status, TaskStatus.completed) self.assertEquals(job.status, TaskStatus.completed)
def test_abort_recipe_bubbles_status_to_job(self): xmljob = XmlJob(xmltramp.parse(''' <job> <whiteboard>job </whiteboard> <recipeSet> <recipe> <distroRequires> <distro_name op="=" value="BlueShoeLinux5-5" /> </distroRequires> <hostRequires/> <task name="/distribution/install" role="STANDALONE"> <params/> </task> </recipe> </recipeSet> <recipeSet> <recipe> <distroRequires> <distro_name op="=" value="BlueShoeLinux5-5" /> </distroRequires> <hostRequires/> <task name="/distribution/install" role="STANDALONE"> <params/> </task> </recipe> </recipeSet> </job> ''')) job = self.controller.process_xmljob(xmljob, self.user) session.flush() for recipeset in job.recipesets: for recipe in recipeset.recipes: recipe.process() recipe.queue() recipe.schedule() recipe.waiting() # Abort the first recipe. job.recipesets[0].recipes[0].abort() job.update_status() # Verify that it and its children are aborted. self.assertEquals(job.recipesets[0].recipes[0].status, TaskStatus.aborted) for task in job.recipesets[0].recipes[0].tasks: self.assertEquals(task.status, TaskStatus.aborted) # Verify that the second recipe and its children are still waiting. self.assertEquals(job.recipesets[1].recipes[0].status, TaskStatus.waiting) for task in job.recipesets[1].recipes[0].tasks: self.assertEquals(task.status, TaskStatus.waiting) # Verify that the job still shows waiting. self.assertEquals(job.status, TaskStatus.waiting) # Abort the second recipe now. job.recipesets[1].recipes[0].abort() job.update_status() # Verify that the whole job shows aborted now. self.assertEquals(job.status, TaskStatus.aborted)
def update_setting(self, *args, **kwargs): ''' Updates user settings. @param **kwargs: contains information about anonymity, sending a queries by email and newsletter ''' user = User.get_by(User.c.user_id==identity.current.user.user_id) s = user.getSettings() if kwargs['anonymous'] == "true": s.anonymous = 1 user.display_name = "anonymous"; else: s.anonymous = 0 user.display_name = user.user_name; if kwargs['email'] == "true": s.email = 1 else: s.email = 0 if kwargs['newsletter'] == "true": s.newsletter = 1 else: s.newsletter = 0 session.save(user); session.save(s); session.flush(); return dict()
def _from_csv(cls,system,data,csv_type,log): """ Import data from CSV file into system.groups """ if 'group' in data and data['group']: try: group = Group.by_name(data['group']) except InvalidRequestError: group = Group(group_name=data['group'], display_name=data['group']) session.add(group) session.flush([group]) deleted = False if 'deleted' in data: deleted = smart_bool(data['deleted']) if deleted: if group in system.groups: activity = SystemActivity(identity.current.user, 'CSV', 'Removed', 'group', '%s' % group, '') system.activity.append(activity) system.groups.remove(group) else: if group not in system.groups: system.groups.append(group) activity = SystemActivity(identity.current.user, 'CSV', 'Added', 'group', '', '%s' % group) system.activity.append(activity) else: log.append("%s: group can't be empty!" % system.fqdn) return False return True
def create_job_for_recipes(recipes, owner=None, whiteboard=None, cc=None,product=None, retention_tag=None, group=None, submitter=None, priority=None, **kwargs): if retention_tag is None: retention_tag = RetentionTag.by_tag(u'scratch') # Don't use default, unpredictable else: retention_tag = RetentionTag.by_tag(retention_tag) if owner is None: owner = create_user() if whiteboard is None: whiteboard = unique_name(u'job %s') job = Job(whiteboard=whiteboard, ttasks=sum(r.ttasks for r in recipes), owner=owner, retention_tag=retention_tag, group=group, product=product, submitter=submitter) if cc is not None: job.cc = cc if priority is None: priority = TaskPriority.default_priority() recipe_set = RecipeSet(ttasks=sum(r.ttasks for r in recipes), priority=priority) recipe_set.recipes.extend(recipes) job.recipesets.append(recipe_set) session.add(job) session.flush() log.debug('Created %s', job.t_id) return job
def test_group_removal_is_noticed(self): self.group.systems.append(self.system) session.flush() b = self.browser login(b) b.get(get_server_base() + 'groups/') b.find_element_by_xpath("//input[@name='group.text']").clear() b.find_element_by_xpath("//input[@name='group.text']").send_keys(self.group.group_name) b.find_element_by_id('Search').submit() delete_and_confirm(b, "//tr[td/a[normalize-space(text())='%s']]" % self.group.group_name, 'Remove') should_have_deleted_msg = b.find_element_by_xpath('//body').text self.assert_('%s deleted' % self.group.display_name in should_have_deleted_msg) # Check it's recorded in System Activity b.get(get_server_base() + 'activity/system') b.find_element_by_link_text('Show Search Options').click() b.find_element_by_xpath("//select[@id='activitysearch_0_table']/option[@value='Action']").click() b.find_element_by_xpath("//select[@id='activitysearch_0_operation']/option[@value='is']").click() b.find_element_by_xpath("//input[@id='activitysearch_0_value']").send_keys('Removed') b.find_element_by_link_text('Add').click() b.find_element_by_xpath("//select[@id='activitysearch_1_table']/option[@value='Old Value']").click() b.find_element_by_xpath("//select[@id='activitysearch_1_operation']/option[@value='is']").click() b.find_element_by_xpath("//input[@id='activitysearch_1_value']").send_keys(self.group.display_name) b.find_element_by_id('searchform').submit() self.assert_(is_activity_row_present(b,via='WEBUI', action='Removed', old_value=self.group.display_name, new_value='', object_='System: %s' % self.system.fqdn))
def reject(self, person_name): '''Reject a user's CLA. This method will remove a user from the CLA group and any other groups that they are in that require the CLA. It is used when a person has to fulfill some more legal requirements before having a valid CLA. Arguments :person_name: Name of the person to reject. ''' show = {} show['show_postal_address'] = config.get('show_postal_address') exc = None user = People.by_username(turbogears.identity.current.user_name) if not is_admin(user): # Only admins can use this turbogears.flash(_('You are not allowed to reject CLAs.')) exc = 'NotAuthorized' else: # Unapprove the cla and all dependent groups person = People.by_username(person_name) for role in person.roles: if self._cla_dependent(role.group): role.role_status = 'unapproved' try: session.flush() except SQLError, error: turbogears.flash(_('Error removing cla and dependent groups' \ ' for %(person)s\n Error was: %(error)s') % {'person': person_name, 'error': str(error)}) exc = 'sqlalchemy.SQLError'
def _handle_historical(session, data,myth_uuid): showcount = 0 rectime = 0 db_age = 0 reccount = 0 try: myth_hist = data['features']['historical'] except: myth_hist = {} #session.query(mythtvHistorical).filter_by(machine_id = machine_id).delete() session.query(mythtvHistorical).filter_by(myth_uuid = myth_uuid).delete() try: showcount = myth_hist['showcount'] except: pass try: rectime = myth_hist['rectime'] except: pass try: db_age = myth_hist['db_age'] except: pass try: reccount = myth_hist['reccount'] except: pass session.add(mythtvHistorical(myth_uuid, showcount,rectime,db_age,reccount) ) session.flush()
def _handle_pbp(session, data, machine_id): name = "unknown" profiles = ['unknown'] try: myth_pb = data['features']['playbackprofile'] except: myth_pb = {} try: name = myth_pb['name'] except: pass try: profiles = myth_pb['profiles'] except: pass #Remove old entry session.query(mythtvPbp).filter_by(machine_id = machine_id).delete() #Add new entry session.add(mythtvPbp(machine_id,name,profiles)) session.flush()
def test_recovers_running_job_with_completed_recipes(self): # job with two recipes, both Completed, but job is Running # and systems are still assigned job = data_setup.create_job(num_recipes=2) data_setup.mark_job_running(job) systems = [r.resource.system for r in job.all_recipes] job.recipesets[0].recipes[0].tasks[-1].stop() job.recipesets[0].recipes[0]._update_status() job.recipesets[0].recipes[1].tasks[-1].stop() job.recipesets[0].recipes[1]._update_status() session.flush() self.assertEquals(job.recipesets[0].recipes[0].status, TaskStatus.completed) self.assertEquals(job.recipesets[0].recipes[1].status, TaskStatus.completed) self.assertEquals(job.recipesets[0].status, TaskStatus.running) self.assertEquals(job.status, TaskStatus.running) self.assert_(systems[0].open_reservation is not None) self.assert_(systems[1].open_reservation is not None) job._mark_dirty() # in reality, we did this by hand job.update_status() session.flush() session.expire_all() self.assertEquals(systems[0].open_reservation, None) self.assertEquals(systems[1].open_reservation, None) self.assertEquals(job.recipesets[0].status, TaskStatus.completed) self.assertEquals(job.status, TaskStatus.completed)
def setUp(self): session.begin() from bkr.server.jobs import Jobs self.controller = Jobs() self.user = data_setup.create_user() data_setup.create_distro_tree(distro_name=u'BlueShoeLinux5-5') session.flush()
def by_user_name(cls, user_name): """ A class method that permits to search users based on their user_name attribute. """ # Try to look up the user via local DB first. user = cls.query.filter_by(user_name=user_name).first() # If user doesn't exist in DB check ldap if enabled. ldapenabled = get('identity.ldap.enabled', False) autocreate = get('identity.soldapprovider.autocreate', False) # Presence of '/' indicates a Kerberos service principal. if not user and ldapenabled and autocreate and '/' not in user_name: filter = ldap.filter.filter_format('(uid=%s)', [user_name.encode('utf8')]) ldapcon = ldap.initialize(get('identity.soldapprovider.uri')) objects = ldapcon.search_st(get('identity.soldapprovider.basedn', ''), ldap.SCOPE_SUBTREE, filter, timeout=get('identity.soldapprovider.timeout', 20)) # no match if(len(objects) == 0): return None # need exact match elif(len(objects) > 1): return None attrs = objects[0][1] # LDAP normalization rules means that we might have found a user # who doesn't actually match the username we were given. if attrs['uid'][0].decode('utf8') != user_name: return None user = User() user.user_name = attrs['uid'][0].decode('utf8') user.display_name = attrs['cn'][0].decode('utf8') user.email_address = attrs['mail'][0].decode('utf8') session.add(user) session.flush() return user
def test_machine_utilization(self): # # Note: If test relies on an SQL script with hard coded time stamps in # 2002 or no finish reservation times. # system1 = data_setup.create_system() system2 = data_setup.create_system() system3 = data_setup.create_system() data_setup.create_manual_reservation( system=system1, start=datetime.datetime(2002, 6, 2, 22, 30, 0), finish=datetime.datetime(2002, 6, 3, 22, 30, 0)) data_setup.create_manual_reservation(system=system2, start=datetime.datetime( 2002, 5, 2, 22, 30, 0)) data_setup.create_manual_reservation( system=system3, start=datetime.datetime(2002, 5, 2, 22, 30, 0), finish=datetime.datetime(2002, 5, 3, 22, 30, 0)) session.flush() rows = [ row for row in self.execute_reporting_query('machine-utilization') ] self.assertEquals(len(rows), 2, rows) self.assertIn((system1.fqdn, Decimal('0.0333')), rows) self.assertIn((system2.fqdn, Decimal('1.0000')), rows) # system3 should not appear self.assertNotIn(system3.fqdn, [row[0] for row in rows])
def mark_recipe_complete(recipe, result=TaskResult.pass_, task_status=TaskStatus.completed, start_time=None, finish_time=None, only=False, server_log=False, **kwargs): mark_recipe_tasks_finished(recipe, result=result, task_status=task_status, start_time=start_time, finish_time=finish_time, only=only, server_log=server_log, **kwargs) recipe.recipeset.job.update_status() if finish_time: recipe.finish_time = finish_time if recipe.reservation_request: recipe.extend(0) recipe.recipeset.job.update_status() if isinstance(recipe.resource, VirtResource): recipe.resource.instance_deleted = datetime.datetime.utcnow() if hasattr(recipe.resource, 'system'): # Similar to the hack in mark_recipe_waiting, we do not want beaker-provision # to try and run the power commands that were just enqueued. session.flush() for cmd in recipe.resource.system.command_queue: if cmd.status == CommandStatus.queued: cmd.change_status(CommandStatus.running) cmd.change_status(CommandStatus.completed) log.debug('Marked %s as complete with result %s', recipe.t_id, result)
def mark_recipe_waiting(recipe, start_time=None, only=False, **kwargs): if start_time is None: start_time = datetime.datetime.utcnow() if not only: mark_recipe_scheduled(recipe, start_time=start_time, **kwargs) recipe.start_time = start_time with mock.patch('bkr.server.dynamic_virt.VirtManager', autospec=True): recipe.provision() if recipe.installation.commands: # Because we run a real beaker-provision in the dogfood tests, it will pick # up the freshly created configure_netboot commands and try pulling down # non-existent kernel+initrd images. When that fails, it will abort our # newly created recipe which we don't want. To work around this, we hack # the commands to be already completed so that beaker-provision skips them. # I would like to have a better solution here... session.flush() for cmd in recipe.installation.commands: cmd.change_status(CommandStatus.running) cmd.change_status(CommandStatus.completed) else: # System without power control, there are no power commands. In the # real world the recipe sits in Waiting with no watchdog kill time # until someone powers on the system. pass recipe.waiting() recipe.recipeset.job.update_status() log.debug('Provisioned %s', recipe.t_id)
def add_ssh_public_key(username): """ Adds a new SSH public key for the given user account. Accepts mimetype:`text/plain` request bodies containing the SSH public key in the conventional OpenSSH format: <keytype> <key> <ident>. :param username: The user's username. """ user = _get_user(username) if not user.can_edit(identity.current.user): raise Forbidden403('Cannot edit user %s' % user) if request.mimetype != 'text/plain': raise UnsupportedMediaType415( 'Request content type must be text/plain') with convert_internal_errors(): keytext = request.data.strip() if '\n' in keytext: raise ValueError('SSH public keys may not contain newlines') elements = keytext.split(None, 2) if len(elements) != 3: raise ValueError('Invalid SSH public key') key = SSHPubKey(*elements) user.sshpubkeys.append(key) session.flush() # to populate id return jsonify(key.__json__())
def reject(self, person_name): '''Reject a user's FPCA. This method will remove a user from the FPCA group and any other groups that they are in that require the FPCA. It is used when a person has to fulfill some more legal requirements before having a valid FPCA. Arguments :person_name: Name of the person to reject. ''' show = {} show['show_postal_address'] = config.get('show_postal_address') exc = None user = People.by_username(turbogears.identity.current.user_name) if not is_admin(user): # Only admins can use this turbogears.flash(_('You are not allowed to reject FPCAs.')) exc = 'NotAuthorized' else: # Unapprove the cla and all dependent groups person = People.by_username(person_name) for role in person.roles: if self._cla_dependent(role.group): role.role_status = 'unapproved' try: session.flush() except SQLError, error: turbogears.flash(_('Error removing cla and dependent groups' \ ' for %(person)s\n Error was: %(error)s') % {'person': person_name, 'error': str(error)}) exc = 'sqlalchemy.SQLError'
def update_reservation_request(id): """ Updates the reservation request of a recipe. The request must be :mimetype:`application/json`. :param id: Recipe's id. :jsonparam boolean reserve: Whether the system will be reserved at the end of the recipe. If true, the system will be reserved. If false, the system will not be reserved. :jsonparam int duration: Number of seconds to reserve the system. :jsonparam string when: Circumstances under which the system will be reserved. Valid values are: onabort If the recipe status is Aborted. onfail If the recipe status is Aborted, or the result is Fail. onwarn If the recipe status is Aborted, or the result is Fail or Warn. always Unconditionally. """ recipe = _get_recipe_by_id(id) if not recipe.can_update_reservation_request(identity.current.user): raise Forbidden403( 'Cannot update the reservation request of recipe %s' % recipe.id) data = read_json_request(request) if 'reserve' not in data: raise BadRequest400('No reserve specified') with convert_internal_errors(): if data['reserve']: if not recipe.reservation_request: recipe.reservation_request = RecipeReservationRequest() if 'duration' in data: duration = int(data['duration']) if duration > MAX_SECONDS_PROVISION: raise BadRequest400( 'Reservation time exceeds maximum time of %s hours' % MAX_HOURS_PROVISION) old_duration = recipe.reservation_request.duration recipe.reservation_request.duration = duration _record_activity(recipe, u'Reservation Request', old_duration, duration) if 'when' in data: old_condition = recipe.reservation_request.when new_condition = RecipeReservationCondition.from_string( data['when']) recipe.reservation_request.when = new_condition _record_activity(recipe, u'Reservation Condition', old_condition, new_condition) session.flush() # to ensure the id is populated return jsonify(recipe.reservation_request.__json__()) else: if recipe.reservation_request: session.delete(recipe.reservation_request) _record_activity(recipe, u'Reservation Request', recipe.reservation_request.duration, None) return jsonify(RecipeReservationRequest.empty_json())
def setUp(cls): # Create two unique labs lab1 = data_setup.create_labcontroller(fqdn=u'lab_%d' % int(time.time() * 1000)) lab2 = data_setup.create_labcontroller(fqdn=u'lab_%d' % int(time.time() * 1000)) # Create two distros and only put one in each lab. cls.distro_tree1 = data_setup.create_distro_tree() cls.distro_tree2 = data_setup.create_distro_tree() session.flush() cls.distro_tree1.lab_controller_assocs = [ LabControllerDistroTree(lab_controller=lab2, url=u'http://notimportant') ] cls.distro_tree2.lab_controller_assocs = [ LabControllerDistroTree(lab_controller=lab1, url=u'http://notimportant') ] # Create a user user = data_setup.create_user() # Create two systems but only put them in lab1. system1 = data_setup.create_system(owner=user) system2 = data_setup.create_system(owner=user) system1.lab_controller = lab1 system2.lab_controller = lab1 session.flush() # Create two jobs, one requiring distro_tree1 and one requiring distro_tree2 job = ''' <job> <whiteboard>%s</whiteboard> <recipeSet> <recipe> <distroRequires> <distro_name op="=" value="%s" /> </distroRequires> <hostRequires/> <task name="/distribution/install" role="STANDALONE"> <params/> </task> </recipe> </recipeSet> </job> ''' xmljob1 = XmlJob( xmltramp.parse( job % (cls.distro_tree1.distro.name, cls.distro_tree1.distro.name))) xmljob2 = XmlJob( xmltramp.parse( job % (cls.distro_tree2.distro.name, cls.distro_tree2.distro.name))) cls.job1 = Jobs().process_xmljob(xmljob1, user) cls.job2 = Jobs().process_xmljob(xmljob2, user)
def logout(self): """Remove the link between this identity and the visit.""" visit = self.visit_link if visit: session.delete(visit) session.flush() # Clear the current identity identity.set_current_identity(SqlAlchemyIdentity())
def logout(self): '''Remove the link between this identity and the visit.''' visit = self.visit_link if visit: session.delete(visit) session.flush() # Clear the current identity identity.set_current_identity(SaFasIdentity())
def test_job_completion_notification_off(self): with session.begin(): job_owner = data_setup.create_user(notify_job_completion=False) job = data_setup.create_job(owner=job_owner) session.flush() data_setup.mark_job_complete(job) self.assertEqual(len(self.mail_capture.captured_mails), 0)
def new_visit_with_key(self, visit_key): created = datetime.now() visit = visit_class() visit.visit_key = visit_key visit.created = created visit.expiry = created + self.timeout session.flush() return Visit(visit_key, True)
def mark_recipe_tasks_finished(recipe, result=TaskResult.pass_, task_status=TaskStatus.completed, finish_time=None, only=False, server_log=False, num_tasks=None, **kwargs): # we accept result=None to mean: don't add any results to recipetasks assert result is None or result in TaskResult finish_time = finish_time or datetime.datetime.utcnow() if not only: mark_recipe_running(recipe, **kwargs) mark_recipe_installation_finished(recipe) # Need to make sure recipe.watchdog has been persisted, since we delete it # below when the recipe completes and sqlalchemy will barf on deleting an # instance that hasn't been persisted. session.flush() if not server_log: recipe.log_server = recipe.recipeset.lab_controller.fqdn recipe.logs = [LogRecipe(path=u'recipe_path', filename=u'dummy.txt')] else: recipe.log_server = u'dummy-archive-server' recipe.logs = [ LogRecipe(server=u'http://dummy-archive-server/beaker/', path=u'recipe_path', filename=u'dummy.txt') ] if not server_log: rt_log = lambda: LogRecipeTask(path=u'tasks', filename=u'dummy.txt') else: rt_log = lambda: LogRecipeTask(server= u'http://dummy-archive-server/beaker/', path=u'tasks', filename=u'dummy.txt') if not server_log: rtr_log = lambda: LogRecipeTaskResult(path=u'/', filename=u'result.txt') else: rtr_log = lambda: LogRecipeTaskResult( server=u'http://dummy-archive-server/beaker/', path=u'/', filename=u'result.txt') for recipe_task in recipe.tasks[:num_tasks]: if result is not None: rtr = RecipeTaskResult(recipetask=recipe_task, result=result) rtr.logs = [rtr_log()] recipe_task.results.append(rtr) recipe_task.logs = [rt_log()] recipe_task.finish_time = finish_time recipe_task._change_status(task_status) log.debug('Marked %s tasks in %s as %s with result %s', num_tasks or 'all', recipe.t_id, task_status, result)