def new_bin(self, batch): bin = Bin() bin.marker_id = self.id bin.batch_id = batch.id object_session(self).add(bin) return bin
def copy(self, hda, history=None, hide_copy=False, flush=True, **kwargs): """ Copy hda, including annotation and tags, add to history and return the given HDA. """ copy = hda.copy(parent_id=kwargs.get('parent_id'), copy_hid=False, copy_tags=hda.tags, flush=flush) if hide_copy: copy.visible = False if history: history.stage_addition(copy) copy.set_size() original_annotation = self.annotation(hda) self.annotate(copy, original_annotation, user=hda.history.user, flush=False) if flush: if history: history.add_pending_items() object_session(copy).flush() return copy
def get_token_for_calendars_restrict_ids(self, account, client_ids, force_refresh=False): ''' For the given account, returns an access token that's associated with a client id from the given list of client_ids. ''' scope = GOOGLE_CALENDAR_SCOPE if not force_refresh: try: gtoken = self._tokens[account.id][scope] if gtoken.client_id in client_ids: return gtoken.value except KeyError: pass # Need to get access token for specific client_id/client_secret pair try: gtoken = account.new_token( scope, client_ids=client_ids) except (ConnectionError, OAuthError): object_session(account).commit() raise self.cache_token(account, gtoken) return gtoken.value
def new_channel(self, raw_data, data, dye, wavelen, status, median, mean, max_height, min_height, std_dev, initial_marker=None, initial_panel=None): """ create new channel and added to this assay """ if not initial_marker: initial_marker = Marker.search('undefined', session=object_session(self)) if not initial_panel: initial_panel = Panel.search('undefined', session=object_session(self)) channel = Channel(raw_data=data, data=data, dye=dye, wavelen=wavelen, status=status, median=median, mean=mean, max_height=max_height, min_height=min_height, std_dev=std_dev) channel.assay = self channel.marker = initial_marker channel.panel = initial_panel return channel
def _get_token(self, account, scope, force_refresh=False): if not force_refresh: try: gtoken = self._tokens[account.id][scope] if datetime.utcnow() < gtoken.expiration: return gtoken except KeyError: pass # If we find invalid GmailAuthCredentials while trying to # get a new token, we mark them as invalid. We want to make # sure we commit those changes to the database before we # actually throw an error. try: gtoken = account.new_token(scope) except (ConnectionError, OAuthError): if object_session(account): object_session(account).commit() else: with session_scope(account.id) as db_session: db_session.merge(account) db_session.commit() raise self.cache_token(account, gtoken) return gtoken
def get_token_for_calendars_restrict_ids(self, account, client_ids, force_refresh=False): """ For the given account, returns an access token that's associated with a client id from the given list of client_ids. """ scope = GOOGLE_CALENDAR_SCOPE if not force_refresh: try: gtoken = self._tokens[account.id][scope] if gtoken.client_id in client_ids: return gtoken.value except KeyError: pass # Need to get access token for specific client_id/client_secret pair try: gtoken = account.new_token(scope, client_ids=client_ids) except (ConnectionError, OAuthError): object_session(account).commit() raise self.cache_token(account, gtoken) return gtoken.value
def add_statement(self, phase, time, value): statement = Statement() statement.user = self statement.phase = phase statement.time = time statement.value = value object_session(self).add(statement)
def _delete(self): """Remove image scale from database and filesystem. """ try: delete_file(self.filesystem_path) except OSError as e: if e.errno != errno.ENOENT: raise object_session(self).delete(self)
def active_ballot_events(self): # this needs to be guarded in the function, as it's circular from . import BallotEvent if not object_session(self): return {} all_events = object_session(self).query(BallotEvent).filter(BallotEvent.is_active).all() return { e: self.slot_for.get(e) for e in all_events }
def create_thumbnail(self, size=(500, 333)): _, thumbnail_filename = create_thumbnail(self.fullpath, size) folder, filename = os.path.split(thumbnail_filename) thumbnail = Thumbnail(image_id=self.id, width=size[0], height=size[1], folder=folder, filename=filename) object_session(self).add(thumbnail) object_session(self).commit()
def delete(self): """Delete this image including all scales from the database and disk. """ self._delete_scales() try: delete_file(self.filesystem_path) except OSError as e: if e.errno != errno.ENOENT: raise object_session(self).delete(self)
def active_ballot_events(self): # this needs to be guarded in the function, as it's circular from . import BallotEvent if not object_session(self): return {} all_events = object_session(self).query(BallotEvent).filter( BallotEvent.is_active).all() return {e: self.slot_for.get(e) for e in all_events}
def update_stats_descendants(self): for qnode in self.hierarchy.qnodes: rnode = qnode.get_rnode(self) if rnode is None: rnode = ResponseNode( survey_id=self.survey_id, assessment_id=self.id, qnode_id=qnode.id) object_session(self).add(rnode) object_session(self).flush() rnode.update_stats_descendants()
def update_stats_ancestors(self): self.update_stats() parent = self.parent if parent is None: qnode = self.parent_qnode if qnode is None: return parent = ResponseNode( survey=self.survey, assessment=self.assessment, qnode=qnode) object_session(self).add(parent) object_session(self).flush() parent.update_stats_ancestors()
def create_thumbnail(self, size=(500, 333)): _, thumbnail_filename = create_thumbnail(self.fullpath, size) folder, filename = os.path.split(thumbnail_filename) thumbnail = Thumbnail( image_id=self.id, width=size[0], height=size[1], folder=folder, filename=filename ) object_session(self).add(thumbnail) object_session(self).commit()
def __init__(self, model=None, parent=None, branch_mode=False): ''' :param model: Plant instance or None :param parent: None :param branch_mode: ''' if branch_mode: if model is None: raise CheckConditionError(_("branch_mode requires a model")) elif object_session(model) and model in object_session(model).new: raise CheckConditionError(_("cannot branch a new plant")) # TODO: shouldn't allow branching plants with quantity < 2 # TODO: shouldn't allow changing the accession code in branch_mode if model is None: model = Plant() self.branched_plant = None if branch_mode: # duplicate the model so we can branch from it without # destroying the first self.branched_plant = model model = self.branched_plant.duplicate(code=None) super(PlantEditor, self).__init__(model, parent) if self.branched_plant: # make a copy of the branched plant for this session self.branched_plant = self.session.merge(self.branched_plant) if not parent and bauble.gui: parent = bauble.gui.window self.parent = parent self._committed = [] view = PlantEditorView(parent=self.parent) self.presenter = PlantEditorPresenter(self.model, view) # add quick response keys self.attach_response(view.get_window(), gtk.RESPONSE_OK, 'Return', gtk.gdk.CONTROL_MASK) self.attach_response(view.get_window(), self.RESPONSE_NEXT, 'n', gtk.gdk.CONTROL_MASK) # set default focus if self.model.accession is None: view.widgets.plant_acc_entry.grab_focus() else: view.widgets.plant_code_entry.grab_focus()
def update_stats_descendants(self): for qchild in self.qnode.children: rchild = qchild.get_rnode(self) if rchild is None: rchild = ResponseNode( survey_id=self.survey_id, assessment_id=self.assessment_id, qnode_id=qchild.id) object_session(self).add(rchild) object_session(self).flush() rchild.update_stats_descendants() for response in self.responses: response.update_stats() self.update_stats()
def save(self, commit=True): """Save the record.""" db = object_session(self) db.add(self) if commit: db.commit() return self
def __init__(self, model, view): ''' :param model: should be an instance of class Family :param view: should be an instance of FamilyEditorView ''' super(FamilyEditorPresenter, self).__init__(model, view) self.session = object_session(model) # initialize widgets self.init_enum_combo('fam_qualifier_combo', 'qualifier') self.synonyms_presenter = SynonymsPresenter(self) self.refresh_view() # put model values in view # connect signals self.assign_simple_handler('fam_family_entry', 'family', editor.UnicodeOrNoneValidator()) self.assign_simple_handler('fam_qualifier_combo', 'qualifier', editor.UnicodeOrEmptyValidator()) notes_parent = self.view.widgets.notes_parent_box notes_parent.foreach(notes_parent.remove) self.notes_presenter = \ editor.NotesPresenter(self, 'notes', notes_parent) if self.model not in self.session.new: self.view.widgets.fam_ok_and_add_button.set_sensitive(True) # for each widget register a signal handler to be notified when the # value in the widget changes, that way we can do things like sensitize # the ok button self._dirty = False
def onEnter(self, dbcluster): dbdecommissioned = HostLifecycle.get_unique(object_session(dbcluster), "decommissioned", compel=True) config = Config() archetype = dbcluster.personality.archetype section = "archetype_" + archetype.name opt = "allow_cascaded_deco" if dbcluster.hosts and (not config.has_option(section, opt) or not config.getboolean(section, opt)): raise ArgumentError("Cannot change state to {0}, as {1}'s " "archetype is {2}.".format( dbdecommissioned.name, dbcluster, archetype.name)) if dbcluster.machines: raise ArgumentError("Cannot change state to {0}, as {1} has " "{2} VM(s).".format(dbdecommissioned.name, dbcluster, len(dbcluster.machines))) for dbhost in dbcluster.hosts: dbhost.status.transition(dbhost, dbdecommissioned)
def pages(self): """ Return all translations that have this file in its relation """ session = object_session(self) attr = getattr(PageInfo, "{}s".format(self.__class__.__name__.lower())) return session.query(PageInfo).filter( attr.any(self.__class__.id == self.id) ).all()
def __init__(self, model, view): ''' model: should be an instance of class Accession view: should be an instance of AccessionEditorView ''' GenericEditorPresenter.__init__(self, model, view) self.session = object_session(model) self._dirty = False # initialize widgets self.refresh_view() # put model values in view # connect signals self.assign_simple_handler('loc_name_entry', 'name', UnicodeOrNoneValidator()) self.assign_simple_handler('loc_code_entry', 'code', UnicodeOrNoneValidator()) self.assign_simple_handler('loc_desc_textview', 'description', UnicodeOrNoneValidator()) self.refresh_sensitivity() if self.model not in self.session.new: self.view.widgets.loc_ok_and_add_button.set_sensitive(True) # the merger danger zone self.merger_candidate = None def on_location_select(location): logger.debug('merger candidate: %s' % location) self.merger_candidate = location from bauble.plugins.garden import init_location_comboentry init_location_comboentry(self, self.view.widgets.loc_merge_comboentry, on_location_select) self.view.connect('loc_merge_button', 'clicked', self.on_loc_merge_button_clicked)
def __init__(self, model, parent=None): ''' :param prop_parent: an instance with a propagation relation :param model: Propagation instance :param parent: the parent widget ''' # the view and presenter are created in self.start() self.view = None self.presenter = None super(PropagationEditor, self).__init__(model, parent) # if mode already has a session then use it, this is unique to # the PropagationEditor because so far it is the only editor # that dependent on a parent editor and the parent editor's # model and session sess = object_session(model) if sess: self.session.close() self.session = sess self.model = model if not parent and bauble.gui: parent = bauble.gui.window self.parent = parent view = PropagationEditorView(parent=self.parent) self.presenter = PropagationEditorPresenter(self.model, view) # add quick response keys self.attach_response(view.get_window(), gtk.RESPONSE_OK, 'Return', gtk.gdk.CONTROL_MASK) self.attach_response(view.get_window(), self.RESPONSE_OK_AND_ADD, 'k', gtk.gdk.CONTROL_MASK) self.attach_response(view.get_window(), self.RESPONSE_NEXT, 'n', gtk.gdk.CONTROL_MASK)
def get_bind(obj): """ Return the bind for given SQLAlchemy Engine / Connection / declarative model object. :param obj: SQLAlchemy Engine / Connection / declarative model object :: from sqlalchemy_utils import get_bind get_bind(session) # Connection object get_bind(user) """ if hasattr(obj, 'bind'): conn = obj.bind else: try: conn = object_session(obj).bind except UnmappedInstanceError: conn = obj if not hasattr(conn, 'execute'): raise TypeError( 'This method accepts only Session, Engine, Connection and ' 'declarative model objects.' ) return conn
def set_job_status(job, status=JobStatus.new): session = object_session(job) if session is None: session = get_session() job.status = status session.add(job) session.commit()
def accepted(self): 'Name that should be used if name of self should be rejected' session = object_session(self) syn = session.query(GenusSynonym).filter( GenusSynonym.synonym_id == self.id).first() accepted = syn and syn.genus return accepted
def remove_callback(values): """ The callback function to remove a species from the species context menu. """ from bauble.plugins.garden.accession import Accession species = values[0] session = object_session(species) if isinstance(species, VernacularName): species = species.species nacc = session.query(Accession).filter_by(species_id=species.id).count() safe_str = utils.xml_safe(str(species)) if nacc > 0: msg = _('The species <i>%(species)s</i> has %(num_accessions)s ' 'accessions. Are you sure you want remove it?') \ % dict(species=safe_str, num_accessions=nacc) else: msg = _("Are you sure you want to remove the species <i>%s</i>?") \ % safe_str if not utils.yes_no_dialog(msg): return try: obj = session.query(Species).get(species.id) session.delete(obj) session.commit() except Exception, e: msg = _('Could not delete.\n\n%s') % utils.xml_safe(e) utils.message_details_dialog(msg, traceback.format_exc(), type=gtk.MESSAGE_ERROR)
def _from_one_to_many_pairs(self, pairs, key, clazz): '''Simple saving of one-to-many relations - not usually associated with from_pairs''' added = [] updated = [] deleted = [] errors = [] session = object_session(self) for item in pairs.get(key,[]): item_id = item.get('id') obj = None if item_id: if int(item_id) < 0: obj = session.query(clazz).get(abs(int(item_id))) session.delete(obj) deleted.append(obj) continue else: obj = session.query(clazz).get(item_id) if obj.from_pairs(item) is True: e = obj._validate_() if e: errors.extend(e) updated.append(obj) else: obj = clazz() getattr(self,key).append(obj) if obj.from_pairs(item) is True: e = obj._validate_() if e: errors.extend(e) added.append(obj) return added, updated, deleted, errors
def new_channel(self, raw_data, data, dye, wavelen, status, median, mean, max_height, min_height, std_dev, initial_marker=None, initial_panel=None): """ create new channel and added to this assay """ if not initial_marker: initial_marker = Marker.search('undefined', session = object_session(self)) if not initial_panel: initial_panel = Panel.search('undefined', session = object_session(self)) channel = Channel( raw_data = data, data = data, dye = dye, wavelen = wavelen, status = status, median = median, mean = mean, max_height = max_height, min_height = min_height, std_dev = std_dev ) channel.fsa = self channel.marker = initial_marker channel.panel = initial_panel return channel
def monitorcpe(e): tenant = e.tenant job = JobBuilder.buildmonitorcpejob(tenant) rundeck_client = ServiceContext().getRdClient() session = object_session(tenant) if job is None: logger.error("monitor cpe job build failded") return try: rundeck_reponse = rundeck_client.import_job(job.to_xml(),fmt = "xml", dupeOption = "create" , project = "dms-sa", uuidOption = "remove") except: logger.error("import account(%s) job failed, for connection reason" % tenant.id) session.flush() return if rundeck_reponse['failed'] == None and rundeck_reponse['skipped'] == None: print rundeck_reponse id = rundeck_reponse['succeeded'][0]["id"] name = rundeck_reponse['succeeded'][0]["name"] rdjob = job.node.createrdjob(name,id) session.add(rdjob) try: ret = rundeck_client.run_job(rdjob.jobid) except Exception,e: logger.error("runjob (%s) error (%s)" % (rdjob.jobid,e.message)) else: status = ret['status'] if status == 'falied': rdjob.jobstate = "runerror" href = ret["href"] logger.error("job run error , the execution link: (%s)" % href) else: rdjob.jobstate = "runsuccess" session.flush()
def get_tagged_objects(tag, session=None): """ Return all object tagged with tag. :param tag: A string or :class:`Tag` :param session: """ close_session = False if not isinstance(tag, Tag): if not session: session = db.Session() tag = session.query(Tag).filter_by(tag=utils.utf8(tag)).first() elif not session: from sqlalchemy.orm.session import object_session session = object_session(tag) # filter out any None values from the query which can happen if # you tag something and then delete it from the datebase # TODO: the missing tagged objects should probably be removed from # the database r = [session.query(mapper).filter_by(id=obj_id).first() for mapper, obj_id in _get_tagged_object_pairs(tag)] r = filter(lambda x: x is not None, r) if close_session: session.close() return r
def onEnter(self, dbcluster): dbdecommissioned = HostLifecycle.get_unique(object_session(dbcluster), "decommissioned", compel=True) config = Config() archetype = dbcluster.personality.archetype section = "archetype_" + archetype.name opt = "allow_cascaded_deco" if dbcluster.hosts and (not config.has_option(section, opt) or not config.getboolean(section, opt)): raise ArgumentError("Cannot change state to {0}, as {1}'s " "archetype is {2}." .format(dbdecommissioned.name, dbcluster, archetype.name)) if dbcluster.virtual_machines: raise ArgumentError("Cannot change state to {0}, as {1} has " "{2} VM(s)." .format(dbdecommissioned.name, dbcluster, len(dbcluster.virtual_machines))) for dbhost in dbcluster.hosts: dbhost.status.transition(dbhost, dbdecommissioned)
def vmcreate(e): tenant = e.tenant msg = e.payload session = object_session(tenant) svc = tenant.getservicebyname(msg.vmType) if svc is None: logger.error( "for node(%s/%s/%s) can not find corresponding service db object" % (tenant.id, msg.vmType, msg.stackid)) return logger.info("part sync start.accountId<%s>" % tenant.id) ctx = ServiceContext() zk_host = ctx.getConfigService().get("Inventory", "zk_address") account_sync(tenant.id, zk_host) logger.info("part sync finished.accountId<%s>" % tenant.id) node = svc.createnode() node.stackid = msg.stackId node.vmtype = msg.vmType node.manageip = msg.vmManagementIP node.publicip = msg.vmPublicIP node.serviceip = msg.vmServiceIP session.add(node) session.commit() logger.info("node(%s/%s/%s) has been created in db" % (tenant.id, node.vmtype, node.stackid)) flag = True for svc in tenant.services: flag = flag and svc.isready() tenant.state = e.fsm.current if flag: tenant.getSM().trigger("create_vm_done", tenant=tenant)
def _record(mapper, target, operation): s = object_session(target) if isinstance(s, SignallingSession): pk = tuple(mapper.primary_key_from_instance(target)) s._model_changes[pk] = (target, operation) else: print 'everything is so f*****g wrong'
def get_split_size(self, body_data, query_string_data): color = body_data['color'] group = body_data['group'] response_links = body_data['response_links'] colors = group.get('color_lineage', []) + [color] begins = group.get('begin_lineage', []) + [group['begin']] s = object_session(self) try: source = s.query(input_source.InputSource ).filter_by(destination_task=self, destination_property=self.parallel_by ).one() size = source.get_size(colors, begins) except Exception as e: LOG.exception('Failed to get split size') self.http.delay('PUT', response_links['failure']) execution = s.query(TaskExecution).filter( TaskExecution.task==self, TaskExecution.color==color).one() execution.data['error'] = \ 'Failed to get split size: %s' % e.message s.commit() return LOG.debug('Split size for %s[%s] colors=%s is %s', self.name, self.parallel_by, colors, size) self.http.delay('PUT', response_links['send_data'], color_group_size=size)
def update(self, obj): super().update( obj ) if type(obj) == dict and 'related_to' in obj: related_marker = Marker.search( d['related_to'], session = object_session(self) or self.__dbh_session ) self.related_to = related_marker
def remove_callback(families): """ The callback function to remove a family from the family context menu. """ family = families[0] from bauble.plugins.plants.genus import Genus session = object_session(family) ngen = session.query(Genus).filter_by(family_id=family.id).count() safe_str = utils.xml_safe(str(family)) if ngen > 0: msg = _('The family <i>%(family)s</i> has %(num_genera)s genera. Are ' 'you sure you want to remove it?') % dict(family=safe_str, num_genera=ngen) else: msg = _("Are you sure you want to remove the family <i>%s</i>?") \ % safe_str if not utils.yes_no_dialog(msg): return try: obj = session.query(Family).get(family.id) session.delete(obj) session.commit() except Exception, e: msg = _('Could not delete.\n\n%s') % utils.xml_safe(e) utils.message_details_dialog(msg, traceback.format_exc(), type=gtk.MESSAGE_ERROR)
def __call__(self): if not mapper.has_identity(self.instance): return None localparent = mapper.object_mapper(self.instance, raiseerror=False) prop = localparent.get_property(self.key) strategy = prop._get_strategy(DeferredColumnLoader) if self.keys: toload = self.keys elif strategy.group: toload = [p.key for p in localparent.iterate_properties if isinstance(p.strategy, DeferredColumnLoader) and p.group==strategy.group] else: toload = [self.key] # narrow the keys down to just those which have no history group = [k for k in toload if k in self.instance._state.unmodified] if strategy._should_log_debug: strategy.logger.debug("deferred load %s group %s" % (mapperutil.attribute_str(self.instance, self.key), group and ','.join(group) or 'None')) session = sessionlib.object_session(self.instance) if session is None: raise exceptions.UnboundExecutionError("Parent instance %s is not bound to a Session; deferred load operation of attribute '%s' cannot proceed" % (self.instance.__class__, self.key)) query = session.query(localparent) if not self.optimizing_statement: ident = self.instance._instance_key[1] query._get(None, ident=ident, only_load_props=group, refresh_instance=self.instance._state) else: statement, params = self.optimizing_statement(self.instance) query.from_statement(statement).params(params)._get(None, only_load_props=group, refresh_instance=self.instance._state) return attributes.ATTR_WAS_SET
def child_context_stats(self, parent_context): """ Given a parent context class, gets all the child context classes, and returns histograms of the number of children per parent. """ session = object_session(self) parent_name = parent_context.__table__.name # Get all the child context relationships rels = [ r for r in inspect(parent_context).relationships if r.back_populates == parent_name ] # Print the histograms for each child context, and recurse! for rel in rels: c = rel.mapper.class_ fk = list(rel._calculated_foreign_keys)[0] # Query for average number of child contexts per parent context label = 'Number of %ss per %s' % (c.__table__.name, parent_name) query = session.query(fk, func.count(c.id).label(label)).group_by(fk) # Render as panadas dataframe histogram df = pd.read_sql(query.statement, query.session.bind) df.hist(label) # Recurse to grandhildren self.child_context_stats(c)
def get_or_create_execution(self, color, group): colors = group.get('color_lineage', []) + [color] begins = group.get('begin_lineage', []) + [group['begin']] parent_color = _get_parent_color(colors) s = object_session(self) try: return s.query(MethodExecution).filter( MethodExecution.method == self, MethodExecution.color == color).one() except NoResultFound: execution = MethodExecution( method=self, color=color, colors=colors, begins=begins, parent_color=parent_color, workflow_id=self.workflow_id, ) s.add(execution) try: s.commit() return execution except IntegrityError: s.rollback() return s.query(MethodExecution).filter( MethodExecution.method == self, MethodExecution.color == color).one()
def onLeave(self, dbcluster): dbalmostready = HostLifecycle.get_unique(object_session(dbcluster), "almostready", compel=True) for dbhost in dbcluster.hosts: if dbhost.status.name == 'ready': dbhost.status.transition(dbhost, dbalmostready)
def get_tag_ids(objs): """ :param objs: a list or tuple of objects Return a list of tag id's for tags associated with obj, only returns those tag ids that are common between all the objs """ # TODO: this function does intersection in the most # straightforward way and could probably do with some optimization #clause = lambda x: and_(TaggedObj.obj_class==_classname(x), # TaggedObj.obj_id==x.id) #ors = or_(*map(clause, objs)) if not objs: return [] session = object_session(objs[0]) s = set() tag_id_query = session.query(Tag.id).join('_objects') for obj in objs: clause = and_(TaggedObj.obj_class == _classname(obj), TaggedObj.obj_id == obj.id) tags = [r[0] for r in tag_id_query.filter(clause)] if len(s) == 0: s.update(tags) else: s.intersection_update(tags) return list(s)
def accepted(self): 'Name that should be used if name of self should be rejected' session = object_session(self) syn = session.query(FamilySynonym).filter( FamilySynonym.synonym_id == self.id).first() accepted = syn and syn.family return accepted
def create_array_result(self, body_data, query_string_data): color = body_data['color'] group = body_data['group'] parent_color = group.get('parent_color') response_links = body_data['response_links'] s = object_session(self) for output_name in self.output_names: source, name, parallel_depths = self.resolve_output_source(s, output_name, []) results = s.query(result.Result ).filter_by(task=source, name=name, parent_color=color ).order_by('result.color' ).all() array_result = result.Result(task=source, name=name, color=color, parent_color=parent_color, data=[r.data for r in results]) s.add(array_result) s.commit() LOG.info('Notifying petri: created array result for task (%s) for' ' workflow "%s"', self.name, self.workflow.name, extra={'workflowName':self.workflow.name}) self.http.delay('PUT', response_links['created'])
def __init__(self, model, view): ''' :param model: should be an instance of class Family :param view: should be an instance of FamilyEditorView ''' super(FamilyEditorPresenter, self).__init__(model, view) self.session = object_session(model) # initialize widgets self.init_enum_combo('fam_qualifier_combo', 'qualifier') self.synonyms_presenter = SynonymsPresenter(self) self.refresh_view() # put model values in view # connect signals self.assign_simple_handler('fam_family_entry', 'family', editor.UnicodeOrNoneValidator()) self.assign_simple_handler('fam_qualifier_combo', 'qualifier', editor.UnicodeOrEmptyValidator()) notes_parent = self.view.widgets.notes_parent_box notes_parent.foreach(notes_parent.remove) self.notes_presenter = \ editor.NotesPresenter(self, 'notes', notes_parent) if self.model not in self.session.new: self.view.widgets.fam_ok_and_add_button.set_sensitive(True) # for each widget register a signal handler to be notified when the # value in the widget changes, that way we can do things like sensitize # the ok button self.__dirty = False
def input_tasks(self): source_ids = set([l.source_id for l in self.input_links]) if source_ids: s = object_session(self) return s.query(Task).filter(Task.id.in_(source_ids)).all() else: return []
def vmcreate(e): tenant = e.tenant msg = e.payload session = object_session(tenant) svc = tenant.getservicebyname(msg.vmType) if svc is None: logger.error("for node(%s/%s/%s) can not find corresponding service db object" % (tenant.id,msg.vmType,msg.stackid)) return logger.info("part sync start.accountId<%s>" % tenant.id) ctx = ServiceContext() zk_host = ctx.getConfigService().get("Inventory","zk_address") account_sync(tenant.id,zk_host) logger.info("part sync finished.accountId<%s>" % tenant.id) node = svc.createnode() node.stackid = msg.stackId node.vmtype = msg.vmType node.manageip = msg.vmManagementIP node.publicip = msg.vmPublicIP node.serviceip = msg.vmServiceIP session.add(node) session.commit() logger.info("node(%s/%s/%s) has been created in db" % (tenant.id,node.vmtype,node.stackid)) flag = True for svc in tenant.services: flag = flag and svc.isready() tenant.state = e.fsm.current if flag: tenant.getSM().trigger("create_vm_done",tenant = tenant)
def sample_ids(self): """ faster implementation of getting sample ids """ session = object_session(self) return [ x[0] for x in session.query(Sample.id).filter( Sample.batch_id == self.id) ]
def populate_obj_inline(self, obj): """ Populate all inline objects. It takes the usual ``obj`` argument that is the **parent** of the inline fields. From these all other values are derived and finally the objects are updated. .. note:: Right now this assumes the relationship operation is a ``append``, thus for example set collections won't work right now. """ session = object_session(obj) for inline, forms in self.inline_fieldsets.values(): inline_model = inline.Meta.model for index, inline_form in enumerate(forms): # Get the primary keys from the form. This ensures that we # update existing objects while new objects get inserted. pks = inline.pks_from_formdata(self.formdata, index) if pks is not None: assert not inline_form.is_extra inline_obj = session.query(inline.Meta.model).get(pks) if inline_obj is None: raise LookupError("Target with pks %s does not exist" % str(pks)) else: assert inline_form.is_extra inline_obj = inline_model() relationship_key = self._relationship_key(inline) getattr(obj, relationship_key).append(inline_obj) # Since the form was created like a standard form and the # object was loaded either from the database or newly created # and added to its associated object, we can now just populate # it as we would do with a normal form and object. inline_form.populate_obj(inline_obj)
def format_raw(self, machines, indent=""): if machines: session = object_session(machines[0]) commands = [] for machine in machines: # Try to guess the name of the chassis result = chassis_re.match(machine.label) if result: chassis = "%sc%s" % (result.group(1), result.group(2)) slot = result.group(3) dbchassis = Chassis.get_unique(session, chassis) if not dbchassis and machine.primary_name: fqdn = "%s.%s" % ( chassis, machine.primary_name.fqdn.dns_domain.name) commands.append("aq add chassis --chassis '%s' " "--rack 'RACK' --model 'MODEL'" % fqdn) else: chassis = 'CHASSIS' slot = 'SLOT' commands.append("aq update machine --machine '%s' " "--chassis '%s' --slot '%s'" % (machine.label, chassis, slot)) return "\n".join(commands)
def create_home_dir(self): from contenttypes.container import Directory, Home from core.database.postgres.permission import AccessRulesetToRule from core.permission import get_or_add_access_rule s = object_session(self) home_root = s.query(Home).one() homedir_name = self.login_name home = Directory(homedir_name) home_root.container_children.append(home) home.children.extend(create_special_user_dirs()) # add access rules so only the user itself can access the home dir private_group = self.get_or_add_private_group() # we need the private group ID, it's set on flush by the DB s.flush() user_access_rule = get_or_add_access_rule(group_ids=[private_group.id]) for access_type in (u"read", u"write", u"data"): ruleset = home.get_or_add_special_access_ruleset(access_type) arr = AccessRulesetToRule(rule=user_access_rule) ruleset.rule_assocs.append(arr) self.home_dir = home logg.info("created home dir for user '%s (id: %s)'", self.login_name, self.id) return home
def create_execution(self, body_data, query_string_data): color = body_data['color'] response_links = body_data['response_links'] s = object_session(self) try: execution = s.query(TaskExecution).filter( TaskExecution.task==self, TaskExecution.color==color).one() except NoResultFound: group = body_data['group'] colors = group.get('color_lineage', []) + [color] begins = group.get('begin_lineage', []) + [group['begin']] parent_color = _get_parent_color(colors) execution = TaskExecution(task=self, color=color, colors=colors, begins=begins, parent_color=parent_color, data={ 'petri_response_links': response_links, }) execution.status = statuses.scheduled execution.status = statuses.running s.add(execution) if self.is_canceled: execution.status = statuses.canceled s.commit() self.http.delay('PUT', response_links['created'])
def scale(self, width=None, height=None, crop=False, strip_whitespace=False): """Return a scaled version of this image. If a matching :py:class:`ImageScale` is found it is returned directly. Otherwise the image will be scaled and a new ImageScale instance is created. See :py:func:`scale_image <s4u.image.scale.scale_image>` for more information on the scaling parameters. :rtype: :py:class:`ImageScale` """ if not self.path: raise TypeError('Can not scale an image that is not stored locally.') if not (width or height): raise ValueError('You must specify either width or height') session = object_session(self) scale = session.query(ImageScale)\ .filter(ImageScale.image_id == self.id)\ .filter(ImageScale.param_width == (width or 0))\ .filter(ImageScale.param_height == (height or 0))\ .filter(ImageScale.param_crop == crop)\ .filter(ImageScale.param_strip_whitespace == strip_whitespace)\ .first() if scale is None: scale = ImageScale(self, width, height, crop, strip_whitespace) session.add(scale) return scale
def get_or_create_execution(self, color, group): colors = group.get('color_lineage', []) + [color] begins = group.get('begin_lineage', []) + [group['begin']] parent_color = _get_parent_color(colors) s = object_session(self) try: return s.query(MethodExecution).filter( MethodExecution.method==self, MethodExecution.color==color).one() except NoResultFound: execution = MethodExecution(method=self, color=color, colors=colors, begins=begins, parent_color=parent_color, workflow_id=self.workflow_id, ) s.add(execution) try: s.commit() return execution except IntegrityError: s.rollback() return s.query(MethodExecution).filter( MethodExecution.method==self, MethodExecution.color==color).one()