def save(self, id=None): """ """ if id is None: abort(404) assay_q = Session.query(Assay) assay = assay_q.filter_by(id=id).first() if assay is None: abort(404) reload_sequences = False for k, v in self.form_result.items(): if k in ("primer_fwd", "primer_rev", "chromosome", "probe_pos", "amplicon_width", "snp_rsid"): if getattr(assay, k) != v: reload_sequences = True if k not in ("id"): setattr(assay, k, v) # blow away previous sequences; on view, this will update. if reload_sequences: cached_sequences = assay.cached_sequences for i in range(len(cached_sequences)): cs = cached_sequences[-1] snps = cs.snps for j in range(len(snps)): snp = snps.pop() Session.delete(snp) cached_sequences.pop() Session.delete(cs) self.__update_tms(assay) Session.commit() session.save() redirect(url(controller="assay", action="view", id=assay.id))
def batch_delete(self, id=None): batch = self.__load_batch(id) if not batch: abort(404) # check for bound plates bound_plates = [p for p in batch.plates if p.plate_id is not None] if bound_plates: session['flash'] = "Cannot delete this batch; there are run plates bound to it." session['flash_class'] = 'error' session.save() return redirect(url(controller='product', action='batch_edit', id=id)) else: try: for plate in batch.plates: Session.delete(plate) batch.plates = [] Session.delete(batch) Session.commit() session['flash'] = "Batch deleted." session.save() except Exception, e: logging.exception("Error from batch deletion:") session['flash'] = "Could not delete the batch from the database." session['flash_class'] = 'error' session.save() return redirect(url(controller='product', action='batch_edit', id=id)) # redirect = Exception! good to know-- don't put in try block... return redirect(url(controller='product', action='batch_list'))
def delete_file(self, id=None): self.__setup_box2_code_context(id) thefile = self.__file_id_query(c.box2.id, self.form_result['file_id']) if not thefile: abort(404) Session.delete(thefile) Session.commit() session['flash'] = 'File deleted.' session.save() return redirect(url(controller='metrics', action='certification', id=c.box2.code))
def update_size(self, id=None): batch = self.__batch(id) if not batch: abort(404) batch_test = self.__batch_test(id) if not batch_test: batch_test = ConsumableBatchTest(consumable_batch_id=batch.id) Session.add(batch_test) batch_test.pixel_calibration = self.form_result["pixel_calibration"] garbage = [] # check for cleared entities first for chan in batch_test.size_channels: thechip = [chip for chip in self.form_result["chips"] if chip["chip_num"] == chan.chip_num] if not thechip: garbage.append(chan) continue thechan = [c for c in thechip[0]["channels"] if c["channel_num"] == chan.channel_num] if not thechan: garbage.append(chan) continue if thechan[0]["droplet_count"] is None and thechan[0]["mean"] is None and thechan[0]["stdev"] is None: garbage.append(chan) for g in garbage: batch_test.size_channels.remove(g) Session.delete(g) # This is the case for a GAE-like Entity or a Mongo object or storing # JSON in a text column or whatever for chip in self.form_result["chips"]: for channel in chip["channels"]: if channel["droplet_count"] is not None or channel["mean"] is not None or channel["stdev"] is not None: dbchan = batch_test.size_channel(chip["chip_num"], channel["channel_num"]) if not dbchan: dbchan = ConsumableBatchSizeChannel( chip_num=chip["chip_num"], channel_num=channel["channel_num"] ) batch_test.size_channels.append(dbchan) dbchan.size_mean = channel["mean"] dbchan.size_stdev = channel["stdev"] dbchan.droplet_count = channel["droplet_count"] Session.commit() session["flash"] = "Sizes updated." session.save() return redirect(url(controller="consumable", action="size", id=batch.id))
def enzyme_conc_delete(self, id=None): if id is None: abort(404) conc = Session.query(EnzymeConcentration).get(id) if not conc: abort(404) assay_id = conc.assay_id Session.delete(conc) Session.commit() redirect(url(controller="assay", action="view", id=assay_id))
def delete(self, id=None): """ """ if id is None: abort(404) assay_q = Session.query(Assay) assay = assay_q.filter_by(id=id).first() if assay: Session.delete(assay) Session.commit() session["flash"] = "Assay deleted." session.save() redirect(url(controller="assay", action="list"))
def tearDown(self): from qtools.model import Session Session.delete(self.testUser) Session.delete(self.testProject) Session.delete(self.readQLP) Session.delete(self.unreadQLP) Session.commit()
def untag(self): well = Session.query(QLBWell).get(self.form_result['well_id']) if not well: abort(500) tag_ids = [tag.id for tag in well.tags] new_id = self.form_result['tag_id'] if new_id in tag_ids: well_tag = [tag for tag in well.well_tags if tag.well_tag_id == new_id][0] Session.delete(well_tag) Session.commit() if self.form_result['tagger_id']: session['person_id'] = self.form_result['tagger_id'] session.save() return {'tag_id': new_id, 'tag_names': [tag.name for tag in well.tags]}
def delete(self, id=None): setup = self.__load_groove_setup(id) if not setup: abort(404) if setup.plates: session["flash"] = "Cannot delete this setup; QTools has processed data for this plate." session["flash_class"] = "error" session.save() return redirect(url(controller="groove", action="edit", id=id)) else: Session.delete(setup) Session.commit() session["flash"] = "Plate deleted." session.save() return redirect(url(controller="groove", action="list"))
def delete(self, id=None): if id is None: abort(404) tag = Session.query(SequenceGroupTag).get(int(id)) if not tag: abort(404) try: tag.sequence_groups = [] Session.delete(tag) Session.commit() except Exception, e: Session.rollback() session['flash'] = 'Could not delete category.' session['flash_class'] = 'error' return redirect(url(controller='assay_group', action='edit', id=id))
def update_fill(self, id=None): batch = self.__batch(id) if not batch: abort(404) garbage = [] # check for cleared entities for chan in batch.fill_channels: thechip = [chip for chip in self.form_result["chips"] if chip["chip_num"] == chan.chip_num] if not thechip: garbage.append(chan) continue thechan = [c for c in thechip[0]["channels"] if c["channel_num"] == chan.channel_num] if not thechan: garbage.append(chan) continue if thechan[0]["fill_time"] is None: garbage.append(chan) for g in garbage: batch.fill_channels.remove(g) Session.delete(g) for chip in self.form_result["chips"]: for channel in chip["channels"]: if channel["fill_time"] is not None: dbchan = batch.fill_channel(chip["chip_num"], channel["channel_num"]) if not dbchan: dbchan = ConsumableBatchFillChannel( chip_num=chip["chip_num"], channel_num=channel["channel_num"] ) batch.fill_channels.append(dbchan) dbchan.fill_time = channel["fill_time"] Session.commit() session["flash"] = "Fill times updated." session.save() return redirect(url(controller="consumable", action="fill", id=batch.id))
def batch_size_upload(self, id=None): batch = self.__batch(id) if not batch: abort(404) batch_test = self.__batch_test(id) if not batch_test: batch_test = ConsumableBatchTest(consumable_batch_id=batch.id) Session.add(batch_test) batch_test.pixel_calibration = self.form_result["pixel_calibration"] for i in range(len(batch_test.size_channels)): sc = batch_test.size_channels.pop() Session.delete(sc) # place files in order chip_num = 0 pc = batch_test.pixel_calibration for idx, channel in enumerate(sorted(self.form_result["sizes"], key=operator.itemgetter("file_num"))): if idx % 8 == 0: chip_num = chip_num + 1 dbchan = ConsumableBatchSizeChannel( chip_num=chip_num, channel_num=(idx % 8) + 1, size_mean=channel["mean"] * pc, size_stdev=channel["stdev"] * pc, droplet_count=channel["droplet_count"], ) batch_test.size_channels.append(dbchan) Session.commit() session["flash"] = "Sizes updated." session.save() return redirect(url(controller="consumable", action="size", id=batch.id))
def sequence_group_unlink_sequences(sequence_group): """ If a sequence group changes, destroy all the original sequences, all amplicons, and associated SNPs. Does not commit. """ for a in sequence_group.amplicons: for cs in a.cached_sequences: for snp in cs.snps: Session.delete(snp) cs.snps = [] Session.delete(cs) a.cached_sequences = [] Session.delete(a) sequence_group.amplicons = [] for t in sequence_group.transcripts: for snp in t.snps: Session.delete(snp) t.snps = [] Session.delete(t) sequence_group.transcripts = [] for fp in sequence_group.forward_primers: if fp.sequence: Session.delete(fp.sequence) Session.delete(fp) sequence_group.forward_primers = [] for rp in sequence_group.reverse_primers: if rp.sequence: Session.delete(rp.sequence) Session.delete(rp) sequence_group.reverse_primers = [] for p in sequence_group.probes: if p.sequence: Session.delete(p.sequence) Session.delete(p) sequence_group.probes = []
def delete_plate_recursive(plate_id): """ Given a plate id, delete: -- The plate and its wells (recursively). -- Any plate metrics associated with the plate. -- Any connection to analysis groups or reprocess configs. -- Keep in mind that if the QLP backing this file is present in the source filesystem, it will be reanalyzed. """ # Well, crap. The joinedload is necessary to populate # the qlbwell/qlbwell_channel tree, such that deleting # the underlying metrics don't orphan the well or channel. # The cascade on PlateMetrics is set to delete-orphan, # which may not be the right thing to do # # The workaround/fix would be to make all plate metric # deletion explicit, and remove the delete-orphan cascade # from plate metric, as children include refs to qlbwell # and qlbwell_channel. plate = Session.query(Plate).filter_by(id=plate_id)\ .options(joinedload_all(Plate.qlbplate, QLBPlate.wells, QLBWell.channels), joinedload_all(Plate.metrics, PlateMetric.well_metrics, WellMetric.well_channel_metrics)).first() if not plate: return # secondary tables first for ag in plate.analysis_groups: Session.delete(ag) for tag in plate.tags: Session.delete(tag) for lot_number in plate.lot_numbers: Session.delete(lot_number) cnv_evidence = Session.query(AssaySampleCNV).filter_by(source_plate_id=plate_id).all() for cev in cnv_evidence: Session.delete(c) enzyme_evidence = Session.query(EnzymeConcentration).filter_by(source_plate_id=plate_id).all() for eev in enzyme_evidence: Session.delete(eev) for pm in plate.metrics: for wm in pm.well_metrics: for wcm in wm.well_channel_metrics: Session.delete(wcm) wm.well_channel_metrics = [] Session.delete(wm) pm.well_metrics = [] Session.delete(pm) plate.metrics = [] from qtools.model.batchplate import ManufacturingPlate related_batches = Session.query(ManufacturingPlate).filter_by(plate_id=plate.id).all() for rb in related_batches: rb.plate_id = None for w in plate.qlbplate.wells: for c in w.channels: Session.delete(c) w.channels = [] if w.file_id != -1: Session.delete(w.file) Session.delete(w) plate.qlbplate.wells = [] if plate.qlbplate.file_id != -1: Session.delete(plate.qlbplate.file) Session.delete(plate.qlbplate) Session.delete(plate)
def write_images_stats_for_plate(dbplate, qlplate, image_source, overwrite=False, override_plate_type=None): """ Write plate metrics to the database and thumbnails to local storage, as dictated by image_source. Metrics will be related to the supplied dbplate (Plate model) qlplate is a QLPlate object derived from reading the QLP file. """ if image_source.subdir_exists(str(dbplate.id)): if not overwrite: return else: image_source.make_subdir(str(dbplate.id)) max_amplitudes = (24000, 12000) show_only_gated = False # keep default behavior if qlplate: for well_name, qlwell in sorted(qlplate.analyzed_wells.items()): # TODO: common lib? if well_channel_automatic_classification(qlwell, 0): fig = plot_fam_peaks(qlwell.peaks, threshold=qlwell.channels[0].statistics.threshold, max_amplitude=max_amplitudes[0]) else: fig = plot_fam_peaks(qlwell.peaks, threshold=qlwell.channels[0].statistics.threshold, threshold_color='red', max_amplitude=max_amplitudes[0], background_rgb=MANUAL_THRESHOLD_FAM_BGCOLOR) fig.savefig(image_source.get_path('%s/%s_%s.png' % (dbplate.id, well_name, 0)), format='png', dpi=72) plt_cleanup(fig) if well_channel_automatic_classification(qlwell, 1): fig = plot_vic_peaks(qlwell.peaks, threshold=qlwell.channels[1].statistics.threshold, max_amplitude=max_amplitudes[1]) else: fig = plot_vic_peaks(qlwell.peaks, threshold=qlwell.channels[1].statistics.threshold, threshold_color='red', max_amplitude=max_amplitudes[1], background_rgb=MANUAL_THRESHOLD_VIC_BGCOLOR) fig.savefig(image_source.get_path('%s/%s_%s.png' % (dbplate.id, well_name, 1)), format='png', dpi=72) plt_cleanup(fig) if qlwell.clusters_defined: threshold_fallback = qlwell.clustering_method == QLWell.CLUSTERING_TYPE_THRESHOLD fig = plot_cluster_2d(qlwell.peaks, width=60, height=60, thresholds=[qlwell.channels[0].statistics.threshold, qlwell.channels[1].statistics.threshold], boundaries=[0,0,12000,24000], show_axes=False, antialiased=True, unclassified_alpha=0.5, use_manual_clusters=not well_channel_automatic_classification(qlwell), highlight_thresholds=threshold_fallback) fig.savefig(image_source.get_path('%s/%s_2d.png' % (dbplate.id, well_name)), format='png', dpi=72) plt_cleanup(fig) pm = [pm for pm in dbplate.plate.metrics if pm.reprocess_config_id is None] for p in pm: Session.delete(p) plate = dbplate_tree(dbplate.plate.id) # override plate_type if supplied (another artifact of bad abstraction) if override_plate_type: plate.plate_type = override_plate_type # this relies on apply_template/apply_setup working correctly on plate addition # verify on DR 10005 plate that this works if plate.plate_type and plate.plate_type.code in beta_plate_types: plate_metrics = get_beta_plate_metrics(plate, qlplate) else: plate_metrics = process_plate(plate, qlplate) Session.add(plate_metrics)
def tearDown(self): Session.rollback() # catches pending changes Session.delete(self.plateTemplate) Session.delete(self.shortTemplate) Session.delete(self.qlbPlate) Session.delete(self.plateVariant) Session.delete(self.unknownPlate) Session.delete(self.nameVariant) Session.delete(self.unknownName) Session.commit() super(TestNamePlate, self).tearDown()