def addAuiTab(self, tabName, evidenceDetails): global caseDir for x in caseDetails: caseDir = x[4] if tabName == "Summary": self.auiNotebook.AddPage( SummaryTab.TabPanel(self.auiNotebook, caseDetails, evidenceDetails), tabName, False, wx.NullBitmap) if tabName == "File": self._dialog = wx.ProgressDialog( "Loading", "Loading {tabName}".format(tabName=tabName), 100) #create loading dialog LoadingDialog(self._dialog) #start loading self.auiNotebook.AddPage( pcapFilesTab.FilesTabPanel(self.auiNotebook, tabName, caseDir), tabName, False, wx. NullBitmap) #calls and open a aui tab from DeletedFilesTab.py LoadingDialog.endLoadingDialog(self) if tabName == "Images": self._dialog = wx.ProgressDialog( "Loading", "Loading {tabName}".format(tabName=tabName), 100) #create loading dialog LoadingDialog(self._dialog) #start loading self.auiNotebook.AddPage( ImagesTab.TabPanel(self.auiNotebook, tabName, caseDir), tabName, False, wx.NullBitmap ) #calls and open a aui tab from DeletedFilesTab.py LoadingDialog.endLoadingDialog(self) if tabName == "Sessions": self._dialog = wx.ProgressDialog( "Loading", "Loading {tabName}".format(tabName=tabName), 100) #create loading dialog LoadingDialog(self._dialog) #start loading self.auiNotebook.AddPage( pcapSessionsTab.SessionsTabPanel(self.auiNotebook, caseDir), tabName, False, wx. NullBitmap) #calls and open a aui tab from DeletedFilesTab.py LoadingDialog.endLoadingDialog(self) if tabName == "DNS": self._dialog = wx.ProgressDialog( "Loading", "Loading {tabName}".format(tabName=tabName), 100) #create loading dialog LoadingDialog(self._dialog) #start loading self.auiNotebook.AddPage( pcapDNSTab.DNSTabPanel(self.auiNotebook, caseDir), tabName, False, wx.NullBitmap ) #calls and open a aui tab from DeletedFilesTab.py LoadingDialog.endLoadingDialog(self) if tabName == "Credentials": self._dialog = wx.ProgressDialog( "Loading", "Loading {tabName}".format(tabName=tabName), 100) #create loading dialog LoadingDialog(self._dialog) #start loading self.auiNotebook.AddPage( pcapCredentialsTab.CredTabPanel(self.auiNotebook, tabName, caseDir), tabName, False, wx. NullBitmap) #calls and open a aui tab from DeletedFilesTab.py LoadingDialog.endLoadingDialog(self) if tabName == "Bookmarks": self._dialog = wx.ProgressDialog( "Loading", "Loading {tabName}".format(tabName=tabName), 100) LoadingDialog(self._dialog) self.auiNotebook.AddPage( AnalyzedDataTab.TabPanel(self.auiNotebook, tabName, evidenceDetails, caseDir, caseDbPath), tabName, False, wx.NullBitmap) #calls and open a aui tab from SummaryTab.py LoadingDialog.endLoadingDialog(self) # TODO un-comment-out the following code once evidence exists properly # note: commented-out to allow File tab to be tested before database code added """for x in evidenceDetails:
def takeProjectedSlice(self, axes, projectionAxis, shouldTransform, order = 1): if (projectionAxis == 2 or (numpy.all(self.alignParams[:,3] == 0) and numpy.all(self.alignParams[:,4] == 1))): # Scaling/rotation doesn't affect the projection; lucky us! data = self.imageArray.max(axis = projectionAxis) # Augment data with an extra dimension to replace the one we # flattened out. data = numpy.expand_dims(data, projectionAxis) # Since we flattened out this axis, change its index to be the only # possible valid index. axes[projectionAxis] = 0 return self.takeSliceFromData(data, axes, shouldTransform, order) elif projectionAxis in [3, 4]: # Projecting through Y or X; just transform the local volume. dialog = wx.ProgressDialog( title = "Constructing projection", message = "Please wait...", maximum = self.size[0], style = wx.PD_AUTO_HIDE | wx.PD_REMAINING_TIME) curTimepoint = self.curViewIndex[1] data = [] for wavelength in range(self.size[0]): data.append(self.transformArray( self.imageArray[wavelength, curTimepoint], *self.alignParams[wavelength], order = 1)) dialog.Update(wavelength) data = numpy.array(data, dtype = self.dtype) dialog.Destroy() return data.max(axis = projectionAxis - 1) else: # Projecting through time; transform EVERY volume. Ouch. dialog = wx.ProgressDialog( title = "Constructing projection", message = "Please wait...", maximum = self.size[0] * self.size[1], style = wx.PD_AUTO_HIDE | wx.PD_REMAINING_TIME) data = [] for timepoint in range(self.size[1]): timeData = [] for wavelength in range(self.size[0]): volume = self.transformArray( self.imageArray[wavelength, timepoint], *self.alignParams[wavelength], order = 1) timeData.append(volume) dialog.Update(timepoint * self.size[0] + wavelength) timeData = numpy.array(timeData, dtype = self.dtype) data.append(timeData) data = numpy.array(data, dtype = self.dtype) data = data.max(axis = 0) dialog.Destroy() # Slice through data per our axes parameter. slice = [Ellipsis] * 4 for axis, position in axes.items(): if axis != 1: slice[axis - 1] = position return data[slice] raise RuntimeError("Couldn't find a valid slice axis.")
def onBtnScan(self, evt): filename = wx.FileSelector("Choose a file to scan") if filename != "": config = wx.ConfigBase.Get() config.SetPath("/Scanning") use_md5 = config.ReadBool("use_md5_checksums", use_md5_default) use_sha1 = config.ReadBool("use_sha1_checksums", use_sha1_default) use_sha256 = config.ReadBool("use_sha256_checksums", use_sha256_default) use_chunks = config.ReadBool("use_chunk_checksums", use_chunks_default) max_chunks = config.ReadInt("max_chunk_checksums", max_chunks_default) size_chunks = config.ReadInt("min_chunk_size", chunk_size_default) old_filename = self.mlfile.filename progressdlg = wx.ProgressDialog( "Scanning file...", "Please wait while Metalink Editor scans the selected file. This can take some time for very large files.", 100, self, wx.PD_AUTO_HIDE | wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_ESTIMATED_TIME) success = self.mlfile.scan_file(filename, use_chunks, max_chunks, size_chunks, progressdlg) progressdlg.Destroy() if not success: return self.txtctrl_size.SetValue(str(self.mlfile.size)) self.txtctrl_filename.SetValue(self.mlfile.filename) md5val = "" if use_md5: md5val = self.mlfile.hashlist['md5'] self.txtctrl_md5.SetValue(md5val) sha1val = "" if use_sha1: sha1val = self.mlfile.hashlist['sha1'] self.txtctrl_sha1.SetValue(sha1val) sha256val = "" if use_sha256: sha256val = self.mlfile.hashlist['sha256'] self.txtctrl_sha256.SetValue(sha256val) if self.mlfile.hashlist['sha256'] == "": self.txtctrl_sha256.Enable(False) # No support for SHA-256 # Update URLs num_urls = self.filelist.GetItemCount() new_filename = self.mlfile.filename if num_urls > 0 and new_filename != old_filename: answer = wx.MessageBox( "Would you like to update your URLs, so that they use the new filename instead of the old?", "Update mirrors?", wx.ICON_QUESTION | wx.YES_NO, self) if answer == wx.YES: print "\nUpdating mirrors with the new filename." print "Changing", old_filename, "to", new_filename + "." item = -1 while True: item = self.filelist.GetNextItem( item, wx.LIST_NEXT_ALL, wx.LIST_STATE_DONTCARE) if item == -1: break url = self.filelist.GetItem(item, 0).GetText() #if old_filename == "": url = os.path.dirname(url) + "/" + new_filename self.filelist.SetStringItem(item, 0, url) #else: # pos = url.rfind(old_filename) # if pos != -1: # print "Updated", url # url = url[:pos] + new_filename # self.filelist.SetStringItem(item, 0, url) item = -1 while True: item = self.filelist.GetNextItem(item, wx.LIST_NEXT_ALL, wx.LIST_STATE_DONTCARE) if item == -1: break url = self.filelist.GetItem(item, 0).GetText() if url.startswith("ed2k://"): # remove ed2k links, they add themselves back in later self.filelist.DeleteItem(item) if url.startswith("magnet:"): # remove magnet links, they add themselves back in later self.filelist.DeleteItem(item) # add ed2k and magnet link to GUI for this file if self.mlfile.ed2k != "": self.addurl(metalink.Resource(self.mlfile.ed2k)) if self.mlfile.magnet != "": self.addurl(metalink.Resource(self.mlfile.magnet)) self.filename = filename + ".metalink" self.new_file = True self.locked = True self.update()
def Run(self): # load board board = pcbnew.GetBoard() # go to the project folder - so that log will be in proper place os.chdir(os.path.dirname(os.path.abspath(board.GetFileName()))) # Remove all handlers associated with the root logger object. for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) # set up logger logging.basicConfig( level=logging.DEBUG, filename="replicate_layout.log", filemode='w', format='%(asctime)s %(name)s %(lineno)d:%(message)s', datefmt='%m-%d %H:%M:%S') logger = logging.getLogger(__name__) logger.info("Replicate layout plugin version: " + VERSION + " started") stdout_logger = logging.getLogger('STDOUT') sl_out = StreamToLogger(stdout_logger, logging.INFO) sys.stdout = sl_out stderr_logger = logging.getLogger('STDERR') sl_err = StreamToLogger(stderr_logger, logging.ERROR) sys.stderr = sl_err _pcbnew_frame = [ x for x in wx.GetTopLevelWindows() if x.GetTitle().lower().startswith('pcbnew') ][0] # check if there is exactly one module selected selected_modules = [ x for x in pcbnew.GetBoard().GetModules() if x.IsSelected() ] selected_names = [] for mod in selected_modules: selected_names.append("{}".format(mod.GetReference())) # if more or less than one show only a messagebox if len(selected_names) != 1: caption = 'Replicate Layout' message = "More or less than 1 footprints selected. Please select exactly one footprint and run the script again" dlg = wx.MessageDialog(_pcbnew_frame, message, caption, wx.OK | wx.ICON_INFORMATION) dlg.ShowModal() dlg.Destroy() logging.shutdown() return # if exactly one module is selected # this is a pivot module pivot_module_reference = selected_names[0] # prepare the replicator logger.info("Preparing replicator with " + pivot_module_reference + " as a reference") try: replicator = replicatelayout.Replicator(board, self.update_progress) except LookupError as exception: caption = 'Replicate Layout' message = str(exception) dlg = wx.MessageDialog(_pcbnew_frame, message, caption, wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() logging.shutdown() return except Exception: logger.exception( "Fatal error when making an instance of replicator") caption = 'Replicate Layout' message = "Fatal error when making an instance of replicator.\n"\ + "You can raise an issue on GiHub page.\n" \ + "Please attach the replicate_layout.log which you should find in the project folder." dlg = wx.MessageDialog(_pcbnew_frame, message, caption, wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() logging.shutdown() return pivot_mod = replicator.get_mod_by_ref(pivot_module_reference) logger.info("Pivot footprint is %s\nLocated on:%s\nWith filenames:%s\nWith sheet_id:%s" \ % (repr(pivot_mod.ref), repr(pivot_mod.sheet_id), repr(pivot_mod.filename), repr(pivot_mod.sheet_id))) list_of_modules_with_same_id = replicator.get_list_of_modules_with_same_id( pivot_mod.mod_id) nice_list = [(x.ref, x.sheet_id) for x in list_of_modules_with_same_id] logger.info("Corresponding footprints are \n%s" % repr(nice_list)) list_of_modules = replicator.get_list_of_modules_with_same_id( pivot_mod.mod_id) if not list_of_modules: caption = 'Replicate Layout' message = "Selected footprint is uniqe in the pcb (only one footprint with this ID)" dlg = wx.MessageDialog(_pcbnew_frame, message, caption, wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() logging.shutdown() return # show dialog logger.info("Showing dialog") dlg = ReplicateLayoutDialog(_pcbnew_frame, replicator, pivot_module_reference) res = dlg.ShowModal() # clear highlight on all modules on selected level for mod in dlg.pivot_modules: clear_highlight_on_module(mod) pcbnew.Refresh() if res == wx.ID_OK: selected_items = dlg.list_sheets.GetSelections() slected_names = [] for sel in selected_items: slected_names.append(dlg.list_sheets.GetString(sel)) replicate_containing_only = not dlg.chkbox_intersecting.GetValue() remove_existing_nets_zones = dlg.chkbox_remove.GetValue() rep_tracks = dlg.chkbox_tracks.GetValue() rep_zones = dlg.chkbox_zones.GetValue() rep_text = dlg.chkbox_text.GetValue() rep_drawings = dlg.chkbox_drawings.GetValue() else: logger.info("User canceled the dialog") logging.shutdown() return # failsafe somtimes on my machine wx does not generate a listbox event level = dlg.list_levels.GetSelection() selection_indeces = dlg.list_sheets.GetSelections() sheets_on_a_level = replicator.get_sheets_to_replicate( pivot_mod, pivot_mod.sheet_id[level]) sheets_for_replication = [ sheets_on_a_level[i] for i in selection_indeces ] # check if all the anchor footprints are on the same layer as pivot footprint # first get all the anchor footprints all_sheet_footprints = [] for sheet in sheets_for_replication: all_sheet_footprints.extend(replicator.get_modules_on_sheet(sheet)) anchor_fp = [ x for x in all_sheet_footprints if x.mod_id == pivot_mod.mod_id ] # then check if all of them are on the same layer if not all(fp.mod.IsFlipped() == pivot_mod.mod.IsFlipped() for fp in anchor_fp): caption = 'Replicate Layout' message = "Anchor footprints must be on the same layer as pivot footprint!" dlg = wx.MessageDialog(_pcbnew_frame, message, caption, wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() logging.shutdown() return # replicate now logger.info("Replicating layout") self.start_time = time.time() self.last_time = self.start_time self.dlg = wx.ProgressDialog("Preparing for replication", "Starting plugin", maximum=100) self.dlg.Show() self.dlg.ToggleWindowStyle(wx.STAY_ON_TOP) try: replicator.replicate_layout(pivot_mod, pivot_mod.sheet_id[0:level + 1], sheets_for_replication, containing=replicate_containing_only, remove=remove_existing_nets_zones, tracks=rep_tracks, zones=rep_zones, text=rep_text, drawings=rep_drawings) logger.info("Replication complete") pcbnew.Refresh() logging.shutdown() self.dlg.Destroy() except LookupError as exception: caption = 'Replicate Layout' message = str(exception) dlg = wx.MessageDialog(_pcbnew_frame, message, caption, wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() logging.shutdown() self.dlg.Destroy() return except Exception: logger.exception("Fatal error when running replicator") caption = 'Replicate Layout' message = "Fatal error when running replicator.\n"\ + "You can raise an issue on GiHub page.\n" \ + "Please attach the replicate_layout.log which you should find in the project folder." dlg = wx.MessageDialog(_pcbnew_frame, message, caption, wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() logging.shutdown() self.dlg.Destroy() return
#Reduce verbosity experiment.exp.verbose = False experiment.exp.inst.verbose = False # create initial population pop = DetectorChoicePopulation() #Steps in calculation max = 100 if gui: import wx prog_dlg = wx.ProgressDialog( "Optimizing detector choice using Genetic Algorithm", "Starting...\n\n\n", max, style=wx.PD_CAN_ABORT | wx.PD_APP_MODAL | wx.PD_AUTO_HIDE, ) prog_dlg.SetSize(wx.Size(500, 200)) prog_dlg.Update(0) latest_avg = [] num_to_quit = 20 keep_going = True skipit = False count = 0 while keep_going: thebest = pop.best().fitness() theavg = pop.fitness()
def addAuiTab(self, tabName, evidenceDetails): global caseDir for x in caseDetails: caseDir = x[4] if tabName == "Summary": self.auiNotebook.AddPage( SummaryTab.TabPanel(self.auiNotebook, caseDetails, evidenceDetails), tabName, False, wx.NullBitmap) if tabName == "Deleted files": self._dialog = wx.ProgressDialog( "Loading", "Loading {tabName}".format(tabName=tabName), 100) #create loading dialog LoadingDialog(self._dialog) #start loading self.auiNotebook.AddPage( DeletedFilesTab.TabPanel(self.auiNotebook, tabName, caseDir), tabName, False, wx. NullBitmap) #calls and open a aui tab from DeletedFilesTab.py LoadingDialog.endLoadingDialog(self) #stop loading if tabName == "Bookmarks": self._dialog = wx.ProgressDialog( "Loading", "Loading {tabName}".format(tabName=tabName), 100) LoadingDialog(self._dialog) self.auiNotebook.AddPage( AnalyzedDataTab.TabPanel(self.auiNotebook, tabName, evidenceDetails, caseDir, caseDbPath), tabName, False, wx.NullBitmap) #calls and open a aui tab from SummaryTab.py LoadingDialog.endLoadingDialog(self) for x in analyzedDataTree: if tabName == x and tabName != "Deleted files": self._dialog = wx.ProgressDialog( "Loading", "Loading {tabName}".format(tabName=tabName), 100) LoadingDialog(self._dialog) addingPage = self.auiNotebook.AddPage( AnalyzedDataTab.TabPanel(self.auiNotebook, tabName, evidenceDetails, caseDir, caseDbPath), tabName, False, wx.NullBitmap) LoadingDialog.endLoadingDialog(self) for x in documentsTree: if tabName == x: self._dialog = wx.ProgressDialog( "Loading", "Loading {tabName}".format(tabName=tabName), 100) LoadingDialog(self._dialog) self.auiNotebook.AddPage( AnalyzedDataTab.TabPanel(self.auiNotebook, tabName, evidenceDetails, caseDir, caseDbPath), tabName, False, wx.NullBitmap) LoadingDialog.endLoadingDialog(self) for x in executablesTree: if tabName == x: self._dialog = wx.ProgressDialog( "Loading", "Loading {tabName}".format(tabName=tabName), 100) LoadingDialog(self._dialog) self.auiNotebook.AddPage( AnalyzedDataTab.TabPanel(self.auiNotebook, tabName, evidenceDetails, caseDir, caseDbPath), tabName, False, wx.NullBitmap) LoadingDialog.endLoadingDialog(self) for x in evidenceDetails: evidenceDbConn = connectdb.create_connection( x[2]) #connects to tsk database evidenceDbInfo = connectdb.select_image_info( evidenceDbConn) #get name, size and md5 from tsk database evidencePart = connectdb.select_image_partitions( evidenceDbConn) #get partition info from tsk database count = 0 for i in evidencePart: count += 1 if tabName == "Vol{count} {desc}: {start}-{end})".format( count=count, desc=str(i[2]), start=str(i[0]), end=str(i[1])): self._dialog = wx.ProgressDialog( "Loading", "Loading {tabName}".format(tabName=tabName), 100) LoadingDialog(self._dialog) self.auiNotebook.AddPage( AnalyzedDataTab.TabPanel(self.auiNotebook, tabName, evidenceDetails, caseDir, caseDbPath), tabName, False, wx.NullBitmap) LoadingDialog.endLoadingDialog(self)
def Export(self): # use the LONGEST title here! That determines the size of the Dialog Box. progress = wx.ProgressDialog(_('Transana Selective Data Export'), _('Exporting Transcript records (This may be slow because of the size of Transcript records.)'), style = wx.PD_APP_MODAL | wx.PD_AUTO_HIDE) if progress.GetSize()[0] > 800: progress.SetSize((800, progress.GetSize()[1])) progress.Centre() db = DBInterface.get_db() try: # Get the user specified file name fs = self.XMLFile.GetValue() # Ensure that the file name has the proper extension if (fs[-4:].lower() != '.xml') and (fs[-4:].lower() != '.tra'): fs = fs + '.tra' # On the Mac, if no path is specified, the data is exported to a file INSIDE the application bundle, # where no one will be able to find it. Let's put it in the user's HOME directory instead. # I'm okay with not handling this on Windows, where it will be placed in the Program's folder # but it CAN be found. (There's no easy way on Windows to determine the location of "My Documents" # especially if the user has moved it.) if "__WXMAC__" in wx.PlatformInfo: # if the specified file has no path specification ... if fs.find(os.sep) == -1: # ... then prepend the HOME folder fs = os.getenv("HOME") + os.sep + fs # Open the output file for writing f = file(fs, 'w') progress.Update(0, _('Writing Headers')) # Importing Definitions from XMLExport XMLExportObject = XMLExport.XMLExport(self, -1, '') # Writing XML's DTD Headers XMLExportObject.WritingXMLDTD(f) # Writing Library Records progress.Update(9, _('Writing Library Records')) seriesList = [] if db != None: dbCursor = db.cursor() if self.seriesNum <> 0: SQLText = """SELECT SeriesNum, SeriesID, SeriesComment, SeriesOwner, DefaultKeywordGroup FROM Series2 WHERE SeriesNum = %s""" dbCursor.execute(SQLText, self.seriesNum) if dbCursor.rowcount > 0: f.write(' <SeriesFile>\n') XMLExportObject.WritingSeriesRecords(f, dbCursor) if dbCursor.rowcount > 0: f.write(' </SeriesFile>\n') if self.episodeNum <> 0: SQLText = """SELECT a.SeriesNum, SeriesID, SeriesComment, SeriesOwner, DefaultKeywordGroup FROM Series2 a, Episodes b WHERE a.SeriesNum = b.SeriesNum AND b.EpisodeNum = %s""" dbCursor.execute(SQLText, self.episodeNum) if dbCursor.rowcount > 0: f.write(' <SeriesFile>\n') XMLExportObject.WritingSeriesRecords(f, dbCursor) if dbCursor.rowcount > 0: f.write(' </SeriesFile>\n') if self.collectionNum <> 0: SQLText = """SELECT a.SeriesNum, SeriesID, SeriesComment, SeriesOwner, DefaultKeywordGroup FROM Series2 a, Episodes b, Clip c, Collect d WHERE a.SeriesNum = b.SeriesNum AND b.EpisodeNum = c.EpisodeNum AND c.CollectNum = d.CollectNum AND d.CollectNum = %s""" dbCursor.execute(SQLText, self.collectNum) if dbCursor.rowcount > 0: f.write(' <SeriesFile>\n') XMLExportObject.WritingSeriesRecords(f, dbCursor) if dbCursor.rowcount > 0: f.write(' </SeriesFile>\n') # Populating list with episodes for seriesRec in dbCursor.fetchall(): if seriesRec not in seriesList: seriesList.append(seriesRec[0]) dbCursor.close() if DEBUG: print "Library Records:", seriesList # Writing Episode Records progress.Update(18, _('Writing Episode Records')) episodesList = [] if db != None: dbCursor = db.cursor() SQLText = """SELECT EpisodeNum, EpisodeID, SeriesNum, TapingDate, MediaFile, EpLength, EpComment FROM Episodes2 WHERE SeriesNum = %s""" # episodes = FakeCursorObject() dbCursor.execute(SQLText, seriesRec[0]) if dbCursor.rowcount > 0: f.write(' <EpisodeFile>\n') XMLExportObject.WritingEpisodeRecords(f, dbCursor) if dbCursor.rowcount > 0: f.write(' </EpisodeFile>\n') # Populating list with episodes for episodeRec in dbCursor.fetchall(): if episodeRec not in episodesList: episodesList.append(episodeRec[0]) if DEBUG: print "Episode Records:", episodesList dbCursor.close() # Writing Core Data Records progress.Update(27, _('Writing Core Data Records')) if db != None: dbCursor = db.cursor() SQLText = """SELECT CoreDataNum, Identifier, Title, Creator, Subject, Description, Publisher, Contributor, DCDate, DCType, Format, Source, Language, Relation, Coverage, Rights FROM CoreData2 WHERE Identifier = 'Volume.mpg'""" dbCursor.execute(SQLText) if dbCursor.rowcount > 0: f.write(' <CoreDataFile>\n') XMLExportObject.WritingCoreDataRecords(f, dbCursor) if dbCursor.rowcount > 0: f.write(' </CoreDataFile>\n') dbCursor.close() # Writing Collection Records progress.Update(36, _('Writing Collection Records')) collectionsList = [] if db != None: dbCursor = db.cursor() SQLText = """SELECT a.CollectNum, CollectID, ParentCollectNum, CollectComment, CollectOwner, a.DefaultKeywordGroup FROM Collections2 a, Clips2 b WHERE a.CollectNum = b.CollectNum AND b.EpisodeNum = %s""" collections = FakeCursorObject() dbCursor.execute(SQLText, episodeRec[0]) # Populating list with episodes for collectionRec in dbCursor.fetchall(): print collectionRec print collectionsList print (collectionRec[0], collectionRec[1]) not in collectionsList print if (collectionRec[0], collectionRec[1]) not in collectionsList: collectionsList.append((collectionRec[0], collectionRec[1])) collections.append(collectionRec) if dbCursor.rowcount > 0: f.write(' <CollectionFile>\n') XMLExportObject.WritingCollectionRecords(f, collections) if dbCursor.rowcount > 0: f.write(' </CollectionFile>\n') if DEBUG: print "Collection Recs:", collectionsList dbCursor.close() #Writing Clip Records progress.Update(45, _('Writing Clip Records')) clipsList = [] if db != None: dbCursor = db.cursor() SQLText = """SELECT ClipNum, ClipID, CollectNum, a.EpisodeNum, a.MediaFile, ClipStart, ClipStop, ClipComment, SortOrder FROM Clips2 a, Episodes2 b WHERE a.EpisodeNum = %s""" # clips = FakeCursorObject() dbCursor.execute(SQLText, episodeRec[0]) if dbCursor.rowcount > 0: f.write(' <ClipFile>\n') XMLExportObject.WritingClipRecords(f, dbCursor) if dbCursor.rowcount > 0: f.write(' </ClipFile>\n') # Populating list with clips for clipRec in dbCursor.fetchall(): if clipRec not in clipsList: clipsList.append(clipRec[0]) if DEBUG: print "Clip Recs:", clipsList dbCursor.close() #Writing Transcript Records transcriptsList = [] progress.Update(54, _('Writing Transcript Records (This will seem slow because of the size of the Transcript Records.)')) if db != None: dbCursor = db.cursor() #Querying all transcripts based on Episodes SQLText1 = """SELECT TranscriptNum, TranscriptID, EpisodeNum, SourceTranscriptNum, ClipNum, SortOrder, Transcriber, ClipStart, ClipStop, Comment, RTFText FROM Transcripts2 WHERE EpisodeNum = %s AND ClipNum = 0""" #Querying all transcripts based on Clips SQLText2 = """SELECT TranscriptNum, TranscriptID, EpisodeNum, SourceTranscriptNum, ClipNum, SortOrder, Transcriber, ClipStart, ClipStop, Comment, RTFText FROM Transcripts2 WHERE ClipNum = %s""" if DEBUG: print "Selecting Transcripts" # transcripts = FakeCursorObject() # Adding all transcripts based on episodes to the list of all transcripts for episodeRec in episodesList: dbCursor.execute(SQLText1, episodeRec) for record in dbCursor.fetchall(): transcriptsList.append(record) # Adding all transcripts based on clips to the list of all transcripts for clipRec in clipsList: dbCursor.execute(SQLText2, clipRec) for record in dbCursor.fetchall(): transcriptsList.append(record) if DEBUG: print "%d Transcripts selected." % dbCursor.rowcount if dbCursor.rowcount > 0: f.write(' <TranscriptFile>\n') #Writing all transcripts into the xml file XMLExportObject.WritingTranscriptRecords(f, dbCursor, progress) if dbCursor.rowcount > 0: f.write(' </TranscriptFile>\n') if DEBUG: print print transcriptsList dbCursor.close() # Collecting ClipKeyword data clipKeywordsList = [] keywordsList = [] # Collecting ClipKeywords from Episodes # Iterate through the Episode list... for episodeRec in episodesList: if DEBUG: print print "episodeRec =", episodeRec # ... add each episode's keyword to the episodeClipKeywords list... episodeClipKeywords = DBInterface.list_of_keywords(Episode = episodeRec) if DEBUG: print print "Episode ClipKeywords =", episodeClipKeywords # ... and add this list to the actual ClipKeyword list. for episodeClip in episodeClipKeywords: if episodeClip not in clipKeywordsList: clipKeywordsList.append(episodeClip) for clipKeyword in clipKeywordsList: if DEBUG: print print clipKeyword[0], clipKeyword[1] # If the keyword isn't already in in the Keyword List... if (clipKeyword[0], clipKeyword[1]) not in keywordsList: # ... add the keyword to the Keyword List. keywordsList.append((clipKeyword[0], clipKeyword[1])) if DEBUG: print print "Keword List =", keywordsList # Collecting ClipKeywords from Clips # Iterate through the Clip List... for clipRec in clipsList: if DEBUG: print print "clipRec =", clipRec # ... add each clip's keyword to the clipClipKeywords list clipClipKeywords = DBInterface.list_of_keywords(Clip = clipRec) if DEBUG: print print "Clip ClipKeywords =", clipClipKeywords # ... and add this to the actual ClipKeyword list. for clipClipKeyword in clipClipKeywords: if clipClipKeyword not in clipKeywordsList: clipKeywordsList.append(clipClipKeyword) for clipKeyword in clipKeywordsList: if DEBUG: print print clipKeyword[0], clipKeyword[1] # If the keyword isn't already in the Keyword List... if (clipKeyword[0], clipKeyword[1]) not in keywordsList: # ... add the keyword to the Keyword List. keywordsList.append((clipKeyword[0], clipKeyword[1])) if DEBUG: print print "Keyword List =", keywordsList # Writing Keyword Records progress.Update(63, _('Writing Keyword Records')) if db != None: dbCursor = db.cursor() SQLText = """SELECT KeywordGroup, Keyword, Definition FROM Keywords2 WHERE KeywordGroup = %s AND Keyword = %s""" allKeywords = FakeCursorObject() for clipKeyword in keywordsList: dbCursor.execute(SQLText, (clipKeyword[0], clipKeyword[1])) for record in dbCursor.fetchall(): allKeywords.append(record) if dbCursor.rowcount > 0: f.write(' <KeywordFile>\n') XMLExportObject.WritingKeywordRecords(f, allKeywords) if dbCursor.rowcount > 0: f.write(' </KeywordFile>\n') dbCursor.close() # Writing ClipKeyword Records progress.Update(72, _('Writing Clip Keyword Records')) if db != None: dbCursor = db.cursor() SQLText = """SELECT EpisodeNum, ClipNum, KeywordGroup, Keyword, Example FROM ClipKeywords2 WHERE KeywordGroup = %s AND Keyword = %s""" allClipKeywords = FakeCursorObject() for clipKeyword in clipKeywordsList: dbCursor.execute(SQLText, (clipKeyword[0], clipKeyword[1])) for record in dbCursor.fetchall(): allClipKeywords.append(record) if dbCursor.rowcount > 0: f.write(' <ClipKeywordFile>\n') XMLExportObject.WritingClipKeywordRecords(f, allClipKeywords) if dbCursor.rowcount > 0: f.write(' </ClipKeywordFile>\n') dbCursor.close() # Collecting Note data notesList = [] # Collecting NoteData from Library... for seriesRec in seriesList: seriesNotesList = DBInterface.list_of_notes(Library = seriesRec) if DEBUG: print print "Library Notes List =", seriesNotesList for seriesNote in seriesNotesList: # If the note isn't already in the notesList... if seriesNote not in notesList: # ... add the note to the notesList. notesList.append(seriesNote) # Collecting NoteData from Episodes # Iterate through the Episode list... for episodeRec in episodesList: # ... add each episode's note to the episodeNoteList... episodeNotesList = DBInterface.list_of_notes(Episode = episodeRec) if DEBUG: print print "Episode Note List =", episodeNotesList for episodeNote in episodeNotesList: # If the note isn't already in the Note List... if episodeNote not in notesList: # ... add the note to the Note List. notesList.append(episodeNote) # Collecting NoteData from Collections # Iterate through the Collection list... for collectionRec in collectionsList: # ... add each collection's note to the collectionNoteList... collectionNotesList = DBInterface.list_of_notes(Collection = collectionRec[0]) for collectionNote in collectionNotesList: # If the note isn't alreadz in the Note List... if collectionNote not in notesList: # ... add the note to the Note List notesList.append(collectionNote) if DEBUG: print print "Collection Note List =", collectionNotesList if DEBUG: print print "Note List =", notesList progress.Update(81, _('Writing Note Records')) if db != None: dbCursor = db.cursor() SQLText = """SELECT NoteNum, NoteID, SeriesNum, EpisodeNum, CollectNum, ClipNum, TranscriptNum, NoteTaker, NoteText FROM Notes2 WHERE NoteID = %s""" allNotes = FakeCursorObject() for note in notesList: dbCursor.execute(SQLText, note) for record in dbCursor.fetchall(): allNotes.append(record) if dbCursor.rowcount > 0: f.write(' <NoteFile>\n') XMLExportObject.WritingNoteRecords(f, allNotes) if dbCursor.rowcount > 0: f.write(' </NoteFile>\n') dbCursor.close() XMLExportObject.Destroy() except: if 'unicode' in wx.PlatformInfo: # Encode with UTF-8 rather than TransanaGlobal.encoding because this is a prompt, not DB Data. prompt = unicode(_('An error occurred during Selective Data Export.\n%s\n%s'), 'utf8') else: prompt = _('An error occurred during Selective Data Export.\n%s\n%s') errordlg = Dialogs.ErrorDialog(self, prompt % (sys.exc_info()[0], sys.exc_info()[1])) errordlg.ShowModal() errordlg.Destroy() if DEBUG: import traceback traceback.print_exc(file=sys.stdout) dbCursor.close() f.close() progress.Update(100) progress.Destroy()
def run_plugin(self, plugin_cls, **kwargs): """Runs plugin of specified class plugin_cls on current data set, replaces current data and refreshes plot""" cfg = None # Instantiate the plugin to see if it has a self.config dict # that should be configured by the user prior to execution plugin_instance = plugin_cls() if hasattr(plugin_instance, "config"): cfg = self.configure_plugin_dlg(plugin_instance) if cfg is None: return try: plugin_process, plugin_queue, exception_queue = mainmodel.run_plugin( plugin_cls, self.data, cfg, **kwargs) except MemoryError as err: # Insufficient memory to run plugin with current data err_dlg = wx.MessageDialog( self.view, message="Insufficient memory to run plugin.", caption="Unable To Run Plugin", style=wx.ICON_ERROR) err_dlg.ShowModal() err_dlg.Destroy() return keepGoing = True try: progress_dlg = wx.ProgressDialog( "Running Plugin", "Please wait, executing plugin...", parent=self.view, style=wx.PD_CAN_ABORT) while keepGoing: wx.MilliSleep(125) (keepGoing, skip) = progress_dlg.UpdatePulse() try: if not plugin_process.is_alive(): # Catch low-level exceptions thrown by multiprocessing, such as MemoryError # exceptions raised when attempting to send data through the queue module_logger.error( "Unknown error occurred during plugin execution, plugin terminated" ) err_msg = ' '.join([ "An unknown error has occurred running the plugin.", "Please ensure your system has sufficient memory and disk space to process this data.", "If the problem persists, please contact the plugin's author." ]) err_dlg = wx.MessageDialog( self.view, message=err_msg, caption="Unable To Run Plugin", style=wx.ICON_ERROR) err_dlg.ShowModal() err_dlg.Destroy() break exc_type, exc = exception_queue.get(block=False) err_str = str(exc) if len(err_str) == 0: err_str = exc_type.__name__ module_logger.error( "Error occurred running plugin: {0}".format(err_str)) err_msg = "An error occurred while running the plugin:\n{0}".format( err_str) err_dlg = wx.MessageDialog(self.view, message=err_msg, caption="Unable To Run Plugin", style=wx.ICON_ERROR) err_dlg.ShowModal() err_dlg.Destroy() break except Queue.Empty: pass try: returned_data = plugin_queue.get(False) except Queue.Empty: continue if returned_data is not None: self.model.data = returned_data break if not keepGoing: break wx.getApp().Yield() finally: plugin_process.join() progress_dlg.Destroy()
def __init__(self, parent, properties=None, show_controls=True, size=(600, 600), loadData=True, **kwargs): wx.Frame.__init__(self, parent, -1, size=size, title='Dimensionality Reduction Plot', **kwargs) self.SetName('Plot main') if properties is not None: global p p = properties if not p.is_initialized(): logging.critical( 'Classifier requires a properties file. Exiting.') raise Exception( 'Classifier requires a properties file. Exiting.') global db db = DBConnect.getInstance() global classifier classifier = parent if loadData: # Define a progress dialog dlg = wx.ProgressDialog( 'Fetching cell data...', '0% Complete', 100, classifier, wx.PD_ELAPSED_TIME | wx.PD_ESTIMATED_TIME | wx.PD_REMAINING_TIME | wx.PD_CAN_ABORT) def cb(frac): cont, skip = dlg.Update(int(frac * 100.), '%d%% Complete' % (frac * 100.)) if not cont: # cancel was pressed dlg.Destroy() raise StopCalculating() # Load the data for each object try: self.data, self.data_dic = self.load_obj_measurements(cb) except StopCalculating: self.PostMessage('User canceled updating training set.') return dlg.Destroy() else: self.data, self.data_dic = None, None self.features_dic = self.load_feature_names() self.class_masks = None self.class_names = None self.object_opacity = None figpanel = PlotNotebook(self) self.figure_scores = figpanel.add('Scores') self.figure_loadings = figpanel.add('Loadings') self.update_figures() sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(figpanel, 1, wx.EXPAND) configpanel = PlotControl(self, self.figure_scores, self.figure_loadings) sizer.Add(configpanel, 0, wx.EXPAND | wx.ALL, 5) self.SetSizer(sizer) self.Centre()
def merge_files(destination, sources, force_headless=False): is_headless = force_headless or get_headless() if not is_headless: import wx if len(sources) == 0: return if not is_headless: progress = wx.ProgressDialog( "Writing " + destination, "Loading " + sources[0], maximum=len(sources) * 4 + 1, style=wx.PD_CAN_ABORT | wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME, ) count = 0 try: pipeline = cpp.Pipeline() has_error = [False] def callback(caller, event): if isinstance(event, cpp.LoadExceptionEvent): has_error = True wx.MessageBox( message="Could not load %s: %s" % (sources[0], event.error), caption="Failed to load %s" % sources[0], ) has_error[0] = True pipeline.add_listener(callback) pipeline.load(sources[0]) if has_error[0]: return if destination.lower().endswith(".h5"): mdest = cpmeas.Measurements(filename=destination, multithread=False) h5_dest = True else: mdest = cpmeas.Measurements(multithread=False) h5_dest = False for source in sources: if not is_headless: count += 1 keep_going, skip = progress.Update(count, "Loading " + source) if not keep_going: return if h5py.is_hdf5(source): msource = cpmeas.Measurements( filename=source, mode="r", multithread=False ) else: msource = cpmeas.load_measurements(source) dest_image_numbers = mdest.get_image_numbers() source_image_numbers = msource.get_image_numbers() if len(dest_image_numbers) == 0 or len(source_image_numbers) == 0: offset_source_image_numbers = source_image_numbers else: offset_source_image_numbers = ( np.max(dest_image_numbers) - np.min(source_image_numbers) + source_image_numbers + 1 ) for object_name in msource.get_object_names(): if object_name in mdest.get_object_names(): destfeatures = mdest.get_feature_names(object_name) else: destfeatures = [] for feature in msource.get_feature_names(object_name): if object_name == cpmeas.EXPERIMENT: if not mdest.has_feature(object_name, feature): src_value = msource.get_experiment_measurement(feature) mdest.add_experiment_measurement(feature, src_value) continue src_values = msource.get_measurement( object_name, feature, image_set_number=source_image_numbers ) mdest[ object_name, feature, offset_source_image_numbers ] = src_values destset = set(destfeatures) if not is_headless: keep_going, skip = progress.Update( count + 1, "Saving to " + destination ) if not keep_going: return if not h5_dest: pipeline.save_measurements(destination, mdest) finally: if not is_headless: progress.Destroy()
def read_videos_for_length(self): self.cap = cv2.VideoCapture(self.video_list_with_address[self.index_video][:-1]) success, image = self.cap.read() count = 0 # get preferences in textbox annotation panel start = int(self.annotation.get_Text(1)) end = int(self.annotation.get_Text(2)) self.address = os.path.dirname(self.config) if not os.path.isdir(self.address + os.sep + self.name): os.mkdir(self.address + os.sep + self.name) num_frame_annotated = int(self.annotation.get_Text(3)) num_frame_automatic = int(self.annotation.get_Text(4)) my_list = list(range(start, end)) # list of integers from 1 to end # adjust this boundaries to fit your needs random.shuffle(my_list) if num_frame_annotated < (end - start): if self.import_from_deeplabcut_flag: frames = self.annotation.Get_annotation_for_deeplabcut_compat() else: frames = my_list[0:num_frame_annotated] if num_frame_automatic + num_frame_annotated<(end - start): frames_annotated = my_list[num_frame_annotated:num_frame_automatic + num_frame_annotated] else: wx.MessageBox('Please enter a number of frames to automatically annotate <= to the total number of selected frames (end-start)\n' 'Number of frame to annotate too high\n ' 'User Input Error' , 'Error!', wx.OK | wx.ICON_ERROR) self.error = 1 return else: wx.MessageBox('Please enter a number of frames to annotate <= to the total number of selected frames (end-start)\n' 'Number of frame to annotate too high\n ' 'User Input Error' , 'Error!', wx.OK | wx.ICON_ERROR) self.error = 1 return address = os.path.dirname(self.annotation.config) p = open(os.path.join(address,self.video_list_with_address[self.index_video][ self.find(self.video_list_with_address[self.index_video], os.sep)[-1] + 1:-1]+ '_index_annotation.txt'), 'w') p2 = open(os.path.join(address, self.video_list_with_address[self.index_video][ self.find(self.video_list_with_address[self.index_video], os.sep)[-1] + 1:-1] + '_index_annotation_auto.txt'), 'w') for i in frames: p.writelines(str(i)) p.writelines('\n') for i in frames_annotated: p2.writelines(str(i)) p2.writelines('\n') p.close() p2.close() else: permission = wx.MessageBox( "Frame already extracted\n" "Do you want to extract frames again? (The procedure will delete previous frames", "Confirm", wx.YES_NO | wx.NO_DEFAULT, self) if permission == 2: shutil.rmtree(self.address + os.sep + self.name) try: os.remove(self.address + os.sep + "Annotation_" + self.video_list_with_address[self.index_video][ self.find(self.video_list_with_address[self.index_video], os.sep)[-1] + 1:-1] + '_' + self.scorer[:-1] + '.csv') os.remove(self.address + os.sep + "Annotation_" + self.video_list_with_address[self.index_video][ self.find(self.video_list_with_address[self.index_video], os.sep)[-1] + 1:-1] + '_' + self.scorer[:-1]) except: pass os.mkdir(self.address + os.sep + self.name) num_frame_annotated = int(self.annotation.get_Text(3)) num_frame_automatic = int(self.annotation.get_Text(4)) my_list = list(range(start, end)) # list of integers from 1 to 99 # adjust this boundaries to fit your needs random.shuffle(my_list) if num_frame_annotated < (end - start): if self.import_from_deeplabcut_flag: frames = self.annotation.Get_annotation_for_deeplabcut_compat() else: frames = my_list[0:num_frame_annotated] if num_frame_automatic + num_frame_annotated < (end - start): frames_annotated = my_list[num_frame_annotated:num_frame_automatic + num_frame_annotated] else: wx.MessageBox( 'Please enter a number of frames to automatically annotate <= to the total number of selected frames (end-start)\n' 'Number of frame to annotate too high\n ' 'User Input Error' , 'Error!', wx.OK | wx.ICON_ERROR) self.error = 1 return else: wx.MessageBox( 'Please enter a number of frames to annotate <= to the total number of selected frames (end-start)\n' 'Number of frame to annotate too high\n ' 'User Input Error' , 'Error!', wx.OK | wx.ICON_ERROR) self.error = 1 return address = os.path.dirname(self.annotation.config) p = open(os.path.join(address, self.video_list_with_address[self.index_video][ self.find(self.video_list_with_address[self.index_video], os.sep)[-1] + 1:-1] + '_index_annotation.txt'), 'w') p2 = open(os.path.join(address, self.video_list_with_address[self.index_video][ self.find(self.video_list_with_address[self.index_video], os.sep)[-1] + 1:-1] + '_index_annotation_auto.txt'), 'w') for i in frames: p.writelines(str(i)) p.writelines('\n') for i in frames_annotated: p2.writelines(str(i)) p2.writelines('\n') p.close() p2.close() else: return self.frames_id = np.asarray(range(start, end, 1)).astype(int) progress = wx.ProgressDialog("extraction in progress", "please wait", maximum=100, parent=self, style=wx.PD_SMOOTH | wx.PD_AUTO_HIDE) self.processSents() while success and count < start: count += 1 success, image = self.cap.read() while success and count >= start and count < end: cv2.imwrite(self.address + os.sep + self.name + os.sep + 'frame_' + ("{:04d}".format(count)) + '.png', image) progress.Update(int(count / len(self.frames_id)) * 100) count += 1 success, image = self.cap.read() progress.Destroy() wx.Yield() self.cap.release()
def XValidate(self, nPermutations): # Make sure all data is available in the training set if not self.classifier.UpdateTrainingSet(): return # Initialize process dialog def cb(frac): cont, skip = dlg.Update(int(frac * 100.), '%d%% Complete' % (frac * 100.)) if not cont: # Cancel was pressed dlg.Destroy() raise StopCalculating() dlg = wx.ProgressDialog( 'Performing grid search for optimal parameters...', '0% Complete', 100, self.classifier, wx.PD_ELAPSED_TIME | wx.PD_ESTIMATED_TIME | wx.PD_REMAINING_TIME | wx.PD_CAN_ABORT) # Define cross validation parameters totalGroups = 5 trainingGroups = 4 # Convert the training set into SVM format and search for optimal parameters # C and gamma using 5-fold cross-validation logging.info( 'Performing grid search for parameters C and gamma on entire training set...' ) self.TranslateTrainingSet(self.classifier.trainingSet.label_matrix, self.classifier.trainingSet.values) C, gamma = self.ParameterGridSearch(callback=cb) dlg.Destroy() logging.info( 'Grid search completed. Found optimal C=%d and gamma=%f.' % (C, gamma)) # Create the classifier and initialize misclassification storage classifier = Pipeline([ ('anova', feature_selection.SelectPercentile(feature_selection.f_classif, percentile=self.percentile)), ('svc', SVC(kernel='rbf', C=C, gamma=gamma, eps=0.1)) ]) nObjects = self.classifier.trainingSet.label_matrix.shape[0] subsetSize = np.ceil(nObjects / float(totalGroups)) indices = np.arange(nObjects) misclassifications = [[] for i in range(nObjects)] # Create group combinations and arrays of all labels and values dt = ','.join('i' * trainingGroups) trainingTotalGroups = list( np.fromiter(combinations(range(totalGroups), trainingGroups), dtype=dt, count=-1)) #trainingTotalGroups = list(combinations(range(totalGroups), trainingGroups)) allLabels = np.array(self.svm_train_labels) allValues = np.array(self.svm_train_values) # For all permutations of the subsets train the classifier on 4 totalGroups and # classify the remaining group for a number of random subsets logging.info('Calculating average classification accuracy %d times over a ' \ '%0.1f%%/%0.1f%% cross-validation process' % \ (nPermutations, trainingGroups/float(totalGroups)*100, \ (1-trainingGroups/float(totalGroups))*100)) dlg = wx.ProgressDialog( 'Calculating average cross-validation accuracy...', '0% Complete', 100, self.classifier, wx.PD_ELAPSED_TIME | wx.PD_ESTIMATED_TIME | wx.PD_REMAINING_TIME | wx.PD_CAN_ABORT) nTrainingTotalGroups = len(trainingTotalGroups) nOperations = float(nPermutations * nTrainingTotalGroups) for per in range(nPermutations): # Split the training set into subsets np.random.shuffle(indices) lastGroupStart = (totalGroups - 1) * subsetSize subsets = np.hsplit(indices[0:lastGroupStart], (totalGroups - 1)) subsets.append(indices[lastGroupStart:], ) for index, group in enumerate(trainingTotalGroups): # Retrieve indices of all objects in the training set trainingSet = np.hstack( [subsets[i] for i in range(totalGroups) if i in group]) # Train a classifier on the subset classifier.fit(allValues[trainingSet], allLabels[trainingSet]) # Predict the test set using the trained classifier testSet = np.hstack( [subsets[i] for i in range(totalGroups) if i not in group]) testLabels = classifier.predict(allValues[testSet]) # Store all misclassifications [misclassifications[testSet[i]].append(testLabels[i]) \ for i in range(len(testLabels)) \ if testLabels[i] != allLabels[testSet][i]] # Update progress dialog cb((nTrainingTotalGroups * per + index) / nOperations) # Calculate average classification accuracy dlg.Destroy() logging.info('Average Classification Accuracy: %f%%' % \ ((1-len([item for sublist in misclassifications for item in sublist]) /\ float(nObjects * nPermutations))*100)) return misclassifications
def on_button(self, event): if event.GetId() == ID_SAMPLE_BUTTON: # try: self.validate_input_ranges() number = self.calc_number_samples() sample_dialog = wx.MessageDialog( self, "Proceed with calculating " + str(number) + " samples?", caption="Confirm sample size", style=wx.ICON_QUESTION | wx.OK | wx.CANCEL, ) if sample_dialog.ShowModal() == wx.ID_OK: save_dialog = wx.FileDialog( event.GetEventObject(), message="Save sampled output", wildcard="Tab separated values (*.tsv)|*.tsv", style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT, ) if save_dialog.ShowModal() == wx.ID_OK: # 1. Copy original parameter values original_values = [] for setting in self.__module.visible_settings(): original_values.append(setting.get_value()) # 2. Sample input parameters self.__sample_list = self.generate_parameter_samples() # 3. Open output file and write headers output_file = open(save_dialog.GetPath(), "w") headers = "" for i, setting in enumerate( self.__module.visible_settings()): # Do not write settings without values, ie, buttons etc if setting.get_text() != "": headers += setting.get_text() if i < len(self.__module.visible_settings()) - 1: headers += "\t" headers += "\n" output_file.write(headers) # 4. Run pipeline once for each sample # ~*~ self.Show(False) progressDialog = wx.ProgressDialog( parent=self, title="Sampling parameters", message="Run 1", maximum=len(self.__sample_list), ) size = progressDialog.GetSize() size.SetWidth(2 * size.GetWidth()) progressDialog.SetSize(size) # ~^~ for i, l in enumerate(self.__sample_list): # print '\nStarting run ' + str(i+1) + '...' for j, value in enumerate(l): if value is not None: setting_nr = self.__parameters_list[j][1] setting = self.__module.setting(setting_nr) setting.set_value(value) # print str(setting.get_text()) + ' -> ' + str(setting.get_value()) # ~*~ progressDialog.Update( i + 1, newmsg="Executing run " + str(i + 1) + " of " + str(len(self.__sample_list)), ) # ~^~ # It's not very efficient to run the complete pipeline # when only the last module's parameter values are # different. However, this is the only way I can get # the images to update correctly for the last module. Ie, # when I don't prepare and run the pipeline from scratch # for every different configuration of the last module, # I get the images generated by the first run for every # run. # 4.1 Prepare to run pipeline self.prepare_for_run() # 4.2 Run modules for module in self.__pipeline.modules(): if (module.get_module_num() <= self.__module.get_module_num()): self.run_module(module) # 4.3 Save output self.save_run_output(i, save_dialog.GetDirectory(), output_file) # This is the way to run headless, if only I could get at the images... # self.stop_now = False # running_pipeline = self.__pipeline.run_with_yield( # run_in_background=False, # status_callback=self.status_callback) # while not self.stop_now: # measurements = running_pipeline.next() # print '...run completed.' # 5. Close output file output_file.close() # ~*~ progressDialog.Destroy() # ~^~ # 6. Set parameters back to original values and close window for i, setting in enumerate( self.__module.visible_settings()): setting.set_value(original_values[i]) self.Close(True)
def CopyImages(self): self.MyLog("Calling CopyImages", str(self.CurrentPatientID)) AllPNGFiles = os.listdir( os.path.join(self.SettingsObj.ROOTPATH, "SavePNG")) AllRAWFiles = os.listdir( os.path.join(self.SettingsObj.ROOTPATH, "SaveRaw")) mess = "Transfering images ..." Nb = len(AllPNGFiles) + len(AllRAWFiles) if len(AllPNGFiles) > 2 or len( AllRAWFiles ) > 2 and self.CurrentPatientID != 'none' and self.CurrentPatientID != 'ID': dialog = wx.ProgressDialog("Clinical", mess, Nb, style=wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_AUTO_HIDE) count = 0 if os.path.isdir( os.path.join(self.SettingsObj.ROIPATH, "SavedImages")) == False: os.mkdir(os.path.join(self.SettingsObj.ROIPATH, "SavedImages")) if os.path.isdir( os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID)) == False: os.mkdir( os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID)) DT = datetime.now() DTTag = str(DT.year) + "-" + str(DT.month) + "-" + str( DT.day) + "_" + str(DT.hour) + "h" + str( DT.minute) + "m" + str(DT.second) + "s" if not os.path.isdir( os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID, DTTag)): os.mkdir( os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID, DTTag)) self.MyLog( "CopyImages", str( os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID, DTTag))) if self.SettingsObj.SAVEPNG: if not os.path.isdir( os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID, DTTag, "SavePNG")): os.mkdir( os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID, DTTag, "SavePNG")) try: for r, d, f in os.walk( os.path.join(self.SettingsObj.ROOTPATH, "SavePNG")): for file in f: shutil.copy( os.path.join(self.SettingsObj.ROOTPATH, "SavePNG", file), os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID, DTTag, "SavePNG", file)) if os.path.isfile( os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID, DTTag, "SavePNG", file)): os.remove( os.path.join(self.SettingsObj.ROOTPATH, "SavePNG", file)) count += 1 dialog.Update(count) except: pass if self.SettingsObj.SAVERAW: if not os.path.isdir( os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID, DTTag, "SaveRaw")): os.mkdir( os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID, DTTag, "SaveRaw")) try: for r, d, f in os.walk( os.path.join(self.SettingsObj.ROOTPATH, "SaveRaw")): for file in f: shutil.copy( os.path.join(self.SettingsObj.ROOTPATH, "SaveRaw", file), os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID, DTTag, "SaveRaw", file)) if os.path.isfile( os.path.join(self.SettingsObj.ROIPATH, "SavedImages", self.CurrentPatientID, DTTag, "SaveRaw", file)): os.remove( os.path.join(self.SettingsObj.ROOTPATH, "SaveRaw", file)) count += 1 dialog.Update(count) except: pass dialog.Destroy()
def OnStart(self, event): if (not self.Validate()): return res = self.li.OnStart() if not res: return if not self.checkAsc(): return self.bc = self.bcckb.GetValue() sunlon = float(self.sundeg.GetValue()) if self.useminckb.GetValue(): sunlon += float(self.sunmin.GetValue())/60.0 if self.usesecckb.GetValue(): sunlon += float(self.sunsec.GetValue())/3600.0 sunretr = False moonlon = float(self.moondeg.GetValue()) if self.useminckb.GetValue(): moonlon += float(self.moonmin.GetValue())/60.0 if self.usesecckb.GetValue(): moonlon += float(self.moonsec.GetValue())/3600.0 moonretr = False mercurylon = float(self.mercurydeg.GetValue()) if self.useminckb.GetValue(): mercurylon += float(self.mercurymin.GetValue())/60.0 if self.usesecckb.GetValue(): mercurylon += float(self.mercurysec.GetValue())/3600.0 mercuryretr = self.mercuryretr.GetValue() venuslon = float(self.venusdeg.GetValue()) if self.useminckb.GetValue(): venuslon += float(self.venusmin.GetValue())/60.0 if self.usesecckb.GetValue(): venuslon += float(self.venussec.GetValue())/3600.0 venusretr = self.venusretr.GetValue() marslon = float(self.marsdeg.GetValue()) if self.useminckb.GetValue(): marslon += float(self.marsmin.GetValue())/60.0 if self.usesecckb.GetValue(): marslon += float(self.marssec.GetValue())/3600.0 marsretr = self.marsretr.GetValue() jupiterlon = float(self.jupiterdeg.GetValue()) if self.useminckb.GetValue(): jupiterlon += float(self.jupitermin.GetValue())/60.0 if self.usesecckb.GetValue(): jupiterlon += float(self.jupitersec.GetValue())/3600.0 jupiterretr = self.jupiterretr.GetValue() saturnlon = float(self.saturndeg.GetValue()) if self.useminckb.GetValue(): saturnlon += float(self.saturnmin.GetValue())/60.0 if self.usesecckb.GetValue(): saturnlon += float(self.saturnsec.GetValue())/3600.0 saturnretr = self.saturnretr.GetValue() useascmc = self.useascmcckb.GetValue() asclon = float(self.ascdeg.GetValue())+float(self.ascmin.GetValue())/60.0+float(self.ascsec.GetValue())/3600.0 mclon = float(self.mcdeg.GetValue())+float(self.mcmin.GetValue())/60.0+float(self.mcsec.GetValue())/3600.0 useapprox = self.useapproxckb.GetValue() approxdeg = float(self.approxdeg.GetValue()) approxmin = float(self.approxmin.GetValue()) approxsec = float(self.approxsec.GetValue()) ftdata = ((sunlon, sunretr), (moonlon, moonretr), (mercurylon, mercuryretr), (venuslon, venusretr), (marslon, marsretr), (jupiterlon, jupiterretr), (saturnlon, saturnretr)) usemin = self.useminckb.GetValue() usesec = self.usesecckb.GetValue() useretr = self.useretrckb.GetValue() ftdatause = (usemin, usesec, useretr) ftdataascmc = (useascmc, asclon, mclon) ftdataapprox = (useapprox, approxdeg, approxmin, approxsec) ########## self.suffix = '' self.progtxt = mtexts.txts['BusyInfo2'] self.progbar = wx.ProgressDialog(mtexts.txts['Calculating'], self.progtxt+'\n', parent=self, style = wx.PD_CAN_ABORT|wx.PD_APP_MODAL|wx.PD_ELAPSED_TIME) self.progbar.Fit() self.btnShow.Enable(False) self.found = False if self.ar != None: del self.ar self.ar = None self.ftready = False self.abort = AbortFindTime() thId = thread.start_new_thread(self.calcCharts, (self.bc, ftdata, ftdatause, ftdataascmc, ftdataapprox, self)) self.timer = wx.Timer(self) self.Bind(wx.EVT_TIMER, self.OnTimer) self.timer.Start(500)
def InstallPluginFromFile(self, filenames): info = [] try: installed = [] for filename in filenames: wzip = zipfile.ZipFile(filename, 'r') xml = wzip.open("%splugin.xml" % wzip.namelist()[0], 'r') root = ElementTree.fromstring(xml.read()) xml.close() name = root[0].text.replace(" ", "_") if name in self._manager.names: item = self._manager.names.index(name) if self._manager.states[item] < 3 or self._manager.states[ item] == 6: root2 = ElementTree.parse( os.path.join(self._parent._app.userdatadir, "plugins", name, "plugin.xml")).getroot() else: root2 = ElementTree.parse( os.path.join(wx.StandardPaths.Get().GetTempDir(), self._manager.tempdirs[name], "plugin.xml")).getroot() if root2[1].text == root[1].text: if self._manager.states[ item] < 3 or self._manager.states[item] == 6: installed.append("%s %s" % (root2[0].text, root2[1].text)) else: self.HandleLinkEvent(name, 5, False) wzip.close() continue else: shutil.rmtree( os.path.join(self._parent._app.userdatadir, "plugins", name)) self._manager.states[item] = 3 self._manager.RemovePlugin(name) self.restart.pop(item) self.listbox.items.pop(item) info.append((wzip, root, name)) if len(installed): installed = "\n".join([" " * 4 + name for name in installed]) wx.MessageBox( _("The following plugins are already installed:\n\n%s") % installed, _("Plugin Manager"), wx.ICON_EXCLAMATION | wx.OK, self) if not len(info): return names = "\n".join([" " * 4 + item[1][0].text for item in info]) install = wx.MessageBox( _("Are you sure you want to install the following plugins?\n\n%s" ) % names, _("Plugin Manager"), wx.ICON_QUESTION | wx.YES_NO, self) if install != wx.YES: return plugindir = os.path.join(self._parent._app.userdatadir, "plugins") disabled = 0 for i in range(len(self._manager.names) - 1, -1, -1): if self._manager.enabled[i] or self._manager.states[i] == 1: break disabled += 1 names = self._manager.names[:] if disabled > 0: names = names[:-disabled] if len(info) > 1: dialog = wx.ProgressDialog("Write++", "", len(info)) i = 0 for wzip, root, name in info: if len(info) > 1: dialog.Update(i, _("Installing %s...") % root[0].text) i += 1 wzip.extractall(plugindir) names.append(name) names.sort() item = names.index(name) self._manager.names.insert(item, name) self._manager.enabled.insert(item, True) restart = root[3].text == "True" self._manager.LoadPlugin(name, not restart, item) text = "<font><b>%s</b> %s<br>%s</font><div align=right>" % ( root[0].text, root[1].text, root[2].text) if hasattr(self._manager.plugins[item], "OnInstall"): self._manager.plugins[item].OnInstall() if restart: text2 = _( "<font color=green>This plugin will be installed when you restart Write++. <a href=';4'>Restart Now</a> <a href='%s;5'>Undo</a></font><br>" ) % name text = text2 + text[:text.index("<div align=right>")] self._manager.states.insert( item, 6) # 6 = will be installed on restart else: if hasattr(self._manager.plugins[item], "OnNewFrame"): for frame in self._manager._app.frames: self._manager.plugins[item].OnNewFrame(frame) if hasattr(self._manager.plugins[item], "OnEnable"): self._manager.plugins[item].OnEnable() if hasattr(self._manager.plugins[item], "OnOptions"): text += _("<a href='%s;0'>Options</a> ") % name text += _("<a href='%s;1'>Disable</a> ") % name + _( "<a href='%s;3'>Remove</a>") % name + " </div>" self._manager.states.insert(item, 0) self.listbox.items.insert(item, text) self.restart.insert(item, restart) self.listbox.SetItemCount(len(self.listbox.items)) self.listbox.Refresh() self.listbox.Update() if len(info) > 1: dialog.Destroy() finally: for item in info: item[0].close()
def Calculation(self): """ Calucualtion of mathematical morpholgy on all results anova and PostHoc TF = Number of consecutive Time frame entered by usr Alpha = Statistical Thesjold Difine by usr SpaceCont = Contigous space point define by usr SpaceFiel File with 3d coodonate to determine distance """ ResultType={'Anova.GFP':'/Result/GFP/Anova','Anova.Electrodes':'/Result/All/Anova', 'PostHoc.GFP':'/Result/GFP/PostHoc','PostHoc.Electrodes':'/Result/All/PostHoc'} self.CorrectedMask={} for r in ResultType: res=self.file.getNode(ResultType[r]) if len(res)>0: ShapeOriginalData=self.file.getNode('/Shape').read() # extrating infromion usefull # used for user interface feed back # number of terms in Anova or in PostHoc mulipled by the number of TF* number of space point muliply * 2(Erosion/Dilatation) self.NbCalcul=2*ShapeOriginalData.prod() #Dictionary Correction Anova Contain the mask of All Anova Mask Keys = statistical Condition name self.dlg = wx.ProgressDialog( 'Multiple Test Correction for '+r, 'Calculation in progress : 0 %', self.NbCalcul, parent=self.parent, style=wx.PD_AUTO_HIDE | wx.PD_REMAINING_TIME) self.dlg.SetSize((200, 130)) tmp={} print(res) for v in res: self.n=0 FolderName=r.split('.')[0] try: os.mkdir(self.resultFolder+'\\'+FolderName) except: pass P=v['P'] Name=v['StatEffect'] CorrectedMask={} # adapt the conseutive number of time frame and the contigous criteria to the length o Data # in case of the user made a mistake. # if their is only one TF we cannot have a TF value !=1 # same remark for the contigous criteria if P.shape[0]==1: self.TF=1 else: self.TF=self.TFDict[FolderName] if P.shape[1] == 1: self.SpaceCont = 1 else: self.SpaceCont=self.SpaceContDict[FolderName] # we compute an openning more restrictive that means errosion folwed by a dilatation # the BinaryData is all the pvalue lower than Alpha self.BinaryData=np.zeros(P.shape) print('init') print(self.AlphaDict[FolderName]) self.BinaryData[P<self.AlphaDict[FolderName]]=1 print(self.BinaryData.sum()) # Dilatation self.__MathematicalMorphology__(Dilate=False) # the BinaryData is the Mask the come from the errosion self.BinaryData=self.Mask # Erosion self.__MathematicalMorphology__(Dilate=True) tmp[Name]=self.Mask # Corrected Mask is a Dicionary tha containe all the binarymask self.CorrectedMask[r]=tmp self.dlg.Destroy() self.file.close()
def onAddEvidence(self, event): try: caseDetails except NameError: #if caseDetails not defined print("Case not opened") else: #if caseDetails is defined openFileDialog = wx.FileDialog( self, "Open", "", "", "*.dd", #creates a filedialog that only allow user to select .dd files wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) openFileDialog.ShowModal() global caseDir, caseDbPath evidencePath = openFileDialog.GetPath( ) #get path of selected dd file fileName = os.path.basename(evidencePath) for x in caseDetails: caseDir = x[4] #get case directory from caseDetails caseDbPath = x[5] #get case database path from caseDetails evidenceDbDir = Path(caseDir + "/Evidence_Database") if evidenceDbDir.is_dir() == False: #check if directory exist os.mkdir( str(evidenceDbDir)) #create directory if it does not exist if fileName != "": self._dialog = wx.ProgressDialog( "Adding evidence", "Creating database for '{s}'".format(s=fileName), 100) LoadingDialog(self._dialog) #starts the loading dialog load_db = subprocess.call([ "tsk_loaddb", "-d", "{caseDir}/Evidence_Database/{fileName}.db".format( caseDir=caseDir, fileName=fileName), evidencePath ]) #use tsk_loaddb to generate tsk database LoadingDialog.endLoadingDialog(self) #ends the loading dialog if load_db == 0: #if no error conn = connectdb.create_connection(caseDbPath) with conn: evidenceDbPath = str( evidenceDbDir) + "/" + fileName + ".db" #hash = "md5sum {evidencePath} | awk '{col}".format(evidencePath=evidenceDbPath, col="{print $1}") #evidenceMd5 = subprocess.Popen([hash], stdout=subprocess.PIPE).communicate()[0] evidenceMd5 = "None" insertEvidence = (1, fileName, evidenceDbPath, datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S"), evidenceMd5) connectdb.insertEvidenceDetails( conn, insertEvidence ) #insert to EvidenceInfo in case database evidenceConn = connectdb.create_connection( caseDir + "/Evidence_Database/" + fileName + ".db") #connect to tsk database evidencePart = connectdb.select_image_partitions( evidenceConn) #get image partitions from tsk database if Path(caseDir + "/Evidence_Database/Deleted_Files.db").is_file( ) == False: #check if Deleted_Files.db exist createDeletedFilesDb = connectdb.create_connection( caseDir + "/Evidence_Database/Deleted_Files.db") deteledFilesTable = "CREATE TABLE 'DeletedFiles' ('fileType' TEXT, 'status' TEXT, 'inode' TEXT, 'filePath' TEXT, 'ctime' TEXT, 'crtime' TEXT, 'atime' TEXT, 'mtime' TEXT, 'size' INTEGER, 'uid' INTEGER, 'gid' INTEGER, 'image' TEXT);" connectdb.createTable( createDeletedFilesDb, deteledFilesTable) #creates if it does not exist else: createDeletedFilesDb = connectdb.create_connection( caseDir + "/Evidence_Database/Deleted_Files.db" ) #connects to Deleted_Files.db for x in evidencePart: if x[2] != "Unallocated": subprocess.Popen( [ "tsk_recover", "-e", "-o", str(x[0]), evidencePath, caseDir + "/Extracted/" + fileName ] ) #recover files from all partitions that re not unallocated listAllDeletedFiles = "fls -rFdl -o {offset} {image}".format( offset=str(x[0]), image=evidencePath) process = subprocess.Popen( listAllDeletedFiles, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) #list all deleted files stdout, stderr = process.communicate() output = stdout.decode() chk = re.sub( r'[ ]\*[ ]', '\t*\t', output ) #change all ' ' in the second and third column of fls output to to '\t' chk = re.sub(r'\n', '\t', chk) #change all '\n' to '\t' chk = chk.split( '\t' ) #splits all values between \t into a list itemList = [] k = 0 for i in range(k, len(chk) - 1, 11): k = i itemList.append( chk[k:k + 11] ) #appends every 11 items into a list with createDeletedFilesDb: for list in itemList: insertDeletedFiles = (list[0], list[1], list[2], list[3], list[4], list[5], list[6], list[7], list[8], list[9], list[10], fileName) connectdb.insertDeletedFiles( createDeletedFilesDb, insertDeletedFiles ) #inserts all deleted files info into Deleted_Files.db wx.MessageBox( "Extracting '{file}' in the background.".format( file=fileName)) global evidenceDetails evidenceDetails = connectdb.select_evidence_details(conn) self.auiNotebook.DeletePage(0) self.auiNotebook.RemovePage(0) self.addAuiTab("Summary", evidenceDetails) self.recreateTree(caseDbPath) openFileDialog.Destroy()