def remove_type_enrichment(): archive = 0 try: archive = sys.argv.pop(1) except IndexError: sys.stderr.write("Usage: [Archive ID]|all\n") sys.exit() con = database.connect() cur = con.cursor() if archive == "all": query = "delete from METADATA_ELEM using ARCHIVED_ITEM \ inner join METADATA_ELEM on METADATA_ELEM.Item_ID = ARCHIVED_ITEM.Item_ID \ where Code is not NULL and Extension_ID = 15" cur.execute(query) query = "update ARCHIVED_ITEM set TypeClassifiedDate = NULL,HasOLACType = 0" cur.execute(query) else: query = "delete from METADATA_ELEM using ARCHIVED_ITEM \ inner join METADATA_ELEM on METADATA_ELEM.Item_ID = ARCHIVED_ITEM.Item_ID \ where Code is not NULL and Extension_ID = 15 and Archive_ID = %s" % (archive) cur.execute(query) query = "update ARCHIVED_ITEM set TypeClassifiedDate = NULL,HasOLACType = 0 \ where Archive_ID = %s" % (archive) cur.execute(query) # make sure the actions stick! con.commit()
def _rc_remove_triggerred(self): """ Removes the selected item by clicking on the remove option in context menu. """ index = self._get_current_index model = self.model().sourceModel() docid = model.items[index.row()] coll_name = self._get_current_coll_name doc_directory = os.path.join(DOCS_PATH, docid) mainWindows = self.parent().parent().parent().parent() # removing the selected item conn, c = database.connect() database.delete_nth_row(conn, c, coll_name, docid) # remove the directory shutil.rmtree(doc_directory) # refreshing the model model.clear_items() collection = database.fetch_collection(c, coll_name) model.add_recording(collection) # refresh the TableWidget in the main windows mainWindows.update_coll_list(coll_name) conn.close()
def main(): try: inputfile = sys.argv.pop(1) except IndexError: sys.stderr.write("You must specify an input file\n") sys.exit() con = database.connect() cur = con.cursor() ctr = 0 enriched = 0 for line in open(inputfile): ctr += 1 id, junk, results = line.strip().split('\t') type, probability = results.split()[0].split(':') percent = "%.3f" % (100*float(probability)) #itemid = getItemID(cur, id) itemid = id if itemid: setClassifiedDate(cur, itemid) if type != 'NONE' and type != 'LNONE' and percent > 1: enriched += 1 enrich_type(cur, itemid, type, percent) print "enriched %s with %s type probability %s" % (id, type, percent) else: print "Error: Cannot find item for %s" % id con.commit() print "%s lines processed" % ctr print "%s items enriched" % enriched print "%s items skipped because of NONE or LNONE" % (ctr - enriched)
def update_collection_widget(self): conn, c = database.connect(add_main=True) database._add_docs_to_maincoll(conn, c) colls = database.get_collections(c) self.listView_collections.update_list([coll[0] for coll in colls]) conn.close()
def _set_collections(self): conn, c = database.connect(add_main=True) database._add_docs_to_maincoll(conn, c) colls = database.get_collections(c) self.dwc_left.listView_collections.add_collections( [coll[0] for coll in colls]) conn.close()
def _item_clicked(self, item): current_index = self.currentIndex().row() conn, c = database.connect() coll_label = self.parent().findChildren(QLabel)[0] coll_name = coll_label.text() mbid = database.get_nth_row(c, coll_name, current_index)[0] self.item_changed.emit(mbid)
def add_received_doc(self, coll, index): conn, c = database.connect() source_index = self.frame_query.tableView_results.model().mapToSource(index) docid = self.recordings[source_index.row()] if database.add_doc_to_coll(conn, c, docid, coll): self.dwc_left.tableView_downloaded.add_item(docid) self.dwc_left.tableView_downloaded.indexes[self.recordings[source_index.row()]] = \ self.dwc_left.tableView_downloaded.rowCount() - 1 self.check_new_doc([docid])
def open_player_collection(self, index): coll = str(self.dwc_left.listView_collections.currentItem().text()) conn, c = database.connect() # add trt/except docid = database.get_nth_row(c, coll, index.row())[0] conn.close() player = PlayerMainWindow(docid=str(docid), parent=self) player.show()
def main(): try: inputfile = sys.argv.pop(1) except IndexError: sys.stderr.write("You must specify an input file\n") sys.exit() con = database.connect() cur = con.cursor() import_subject_classified(con,inputfile)
def _open_coll_table(self): current_coll = self.listView_collections.currentItem() if current_coll: self.coll_dialog = DialogCollTable(self) self.coll_dialog.label_collection.setText(current_coll.text()) conn, c = database.connect() collection = database.fetch_collection(c, current_coll.text()) self.coll_dialog.model.add_recording(collection) conn.close() self.coll_dialog.show()
def main(): archive_id = 0 try: archive = sys.argv.pop(1) except IndexError: sys.stderr.write("Usage: [Archive ID]|all") sys.exit() con = database.connect() cur = con.cursor() remove_subject_enrichments(archive_id)
def dropEvent(self, event): # The QTableWidget from which selected rows will be moved sender = event.source() # Default dropEvent method fires dropMimeData with appropriate # parameters (we're interested in the row index). super(QTableWidget, self).dropEvent(event) # Now we know where to insert selected row(s) drop_row = self.last_drop_row selected_rows = sender.get_selected_rows() selected_rows_index = [item.row() for item in selected_rows] # if sender == receiver (self), after creating new empty rows selected # rows might change their locations sel_rows_offsets = [ 0 if self != sender or srow < drop_row else len(selected_rows_index) for srow in selected_rows_index ] selected_rows_index = [ row + offset for row, offset in zip(selected_rows_index, sel_rows_offsets) ] # copy content of selected rows into empty ones docs = [] conn, c = database.connect() for i, srow in enumerate(selected_rows): source_index = sender.model().mapToSource(srow) if database.add_doc_to_coll(conn, c, self.recordings[source_index.row()], self.coll): # index in the source model index = sender.model().mapToSource(selected_rows[i]) # item in the source model item = sender.model().sourceModel().item(index.row(), 1) if item: self.add_item(item.text()) docs.append(self.recordings[source_index.row()]) self.indexes[self.recordings[source_index.row()]] = \ self.rowCount() - 1 # sender.model().sourceModel().set_checked( # [selected_rows_index[i]]) if docs: self.added_new_doc.emit(docs) event.accept()
def clicked_ok(self): conn, c = database.connect() user_input = str(self.coll_edit.text()) status = False if user_input: status = database.add_collection(conn, c, user_input) if status: self.close() self.new_collection_added.emit() self.parent().update_collection_widget() else: msg_box = QMessageBox() msg_box.setText('Given collection name is not valid!') msg_box.setWindowTitle('') msg_box.exec_()
def _add_actions(self): self.open_dunya = QAction("Open on Player", self) self.open_dunya.setIcon(QIcon(DUNYA_ICON)) self.addAction(self.open_dunya) self.addSeparator() collections_menu = self.addMenu('Add to collection') conn, c = database.connect() collections = database.get_collections(c) for coll in collections: act = CollectionAction(str(coll[0]), self) collections_menu.addAction(act) conn.close() self.addSeparator() self.overall_hist_action = QAction("Compute overall histograms", self) self.addAction(self.overall_hist_action)
def _compute_overall_histograms(self): """ Computes the overall histograms of selected items in a table. """ coll_widget = self.parent().parent().listView_collections coll = str(coll_widget.currentItem().text()) conn, c = database.connect() histograms = {} for row in self.selected_indexes: mbid = str(database.get_nth_row(c, coll, row)[0]) pd_path = os.path.join(DOCS_PATH, mbid, 'audioanalysis--pitch_distribution.json') tnc_path = os.path.join(DOCS_PATH, mbid, 'audioanalysis--tonic.json') vals, bins = load_pd(pd_path) tonic = load_tonic(tnc_path) histograms[mbid] = [[vals, bins], tonic] corpusbasestatistics.compute_overall_histogram(histograms)
def reset_for_type_enrichment(): archive = 0 try: archive = sys.argv.pop(1) except IndexError: sys.stderr.write("Usage: [Archive ID]|all") sys.exit() con = database.connect() cur = con.cursor() if archive == "all": query = "update ARCHIVED_ITEM set TypeClassifiedDate = NULL,HasOLACType = 0;" else: query = "update ARCHIVED_ITEM set TypeClassifiedDate = NULL,HasOLACType = 0 where Archive_ID = %s;" % (archive) #print "executing: ", query cur.execute(query)
def main(): try: inputfile = sys.argv.pop(1) outputfile = sys.argv.pop(1) except IndexError: sys.stderr.write("You must specify input and output files\n") sys.exit() OUT = open(outputfile, 'w') con = database.connect() cur = con.cursor() ctr = 0 enriched = 0 # keep track of database commits updates = 0 for line in open(inputfile): id, junk, results = line.strip().split('\t') if id == 'Item_ID': continue ctr += 1 tokens = results.split() key1, value1 = tokens[0].split(':') key2, value2 = tokens[1].split(':') key3, value3 = tokens[2].split(':') # find the probability that it is a language resource answer = key1 if(key1 == 'YES'): probability = "%.3f" % (100*float(value1)) elif(key2 == 'YES'): probability = "%.3f" % (100*float(value2)) elif(key3 == 'YES'): probability = "%.3f" % (100*float(value3)) else: probability = "0.000" print "P(x) not found for ID "+ id #itemid = getItemID(cur,id) itemid = id #print "answer:", answer, "P(x):", probability #setClassifiedDate(cur, id) if answer != 'NONE' and answer != 'LNONE': if float(probability) > 1: enriched += 1 enrich_binary(cur, id, probability) # create merge file print >>OUT, itemid, '\t', probability # update DB every 1000 records updates += 1 if updates > 1000: con.commit() updates = 0 print "enriched %s with %s answer probability %s" % (id, answer, probability) else: print "failed: ", results con.commit() print "%s lines processed" % ctr print "%s items enriched" % enriched print "%s items skipped because of NONE or LNONE" % (ctr - enriched)
def update_coll_list(self, coll): conn, c = database.connect() raw = database.fetch_collection(c, coll) self.dwc_left.tableView_downloaded.coll = coll self.dwc_left.tableView_downloaded.create_table([item[0] for item in raw]) self.dwc_left.change_downloaded_text(coll)
def _set_playlist_table(self, coll_name): conn, c = database.connect() collection = database.fetch_collection(c, coll_name) self.playlist_table.add_recordings(collection)