def __init__(self, owner): """Create the SourceController @param owner: Owner window """ object.__init__(self) # Attributes self._parent = owner self._pid = self._parent.GetId() self.config = ConfigData() # Singleton config data instance self.tempdir = None self.scThreads = {} # Number of seconds to allow a source control command to run # before timing out self.scTimeout = 60
def __init__(self, config_file, workspace_folder): self.logger = logging.getLogger(self.logger_name + '.MailBox') self.workspace = workspace_folder self.configs = ConfigData(config_file) self.logger.debug('workspace "{}" configs: {}'.format( self.workspace, self.configs.toStringFiltered())) self.logger.debug('creating MailReceiver') self.receiver = MailReceiver(self.configs.imap,\ self.configs.imap_port,\ self.configs.ssl_enabled,\ self.configs.username,\ self.configs.password) self.logger.debug('creating MailSender') self.sender = MailSender(self.configs.smtp,\ self.configs.smtp_port,\ self.configs.username,\ self.configs.password)
def GetConfigurationData(self): ''' Get configuration data from file. @returns: ConfigData - Configuration data wrapped within a ConfigData object. ''' with open(self._configFile, 'r') as f: content = f.read() broadcastData = content.split('\n') portData = broadcastData[0] messageData = broadcastData[1] port = int(self._getCleanedData(portData)) message = self._getCleanedData(messageData) return ConfigData(port, message)
class GraphClusterer_WI_semisupervised(object): CODE_DIR = '/Users/divya/work/repo/Dissertation' #CODE_DIR = '/Users/divya/Documents/Dissertation/Dissertation' DATA_DIR = '/Users/divya/Documents/input/Dissertation/data' LOG_DIR = '/Users/divya/Documents/logs/Dissertation' configdata = ConfigData(CODE_DIR, DATA_DIR, LOG_DIR) #graphdata = GraphData_Gene(GO_SIM_TYPE) graphdata = None def __init__(self, K): self.phase1data = None self.phase2data = None self.phase1_allKevaluationdata = None self.phase2_allKevaluationdata = None self.helper = MKNN_Helper() #self.GO_SIM_TYPE = 'BP' ############################################################## #1. Initialize MKNN #2. As a wrapper, call MKNN_worker for different values of K ############################################################## def MKNN_worker_wrapper(self): #GraphClusterer.configdata.logger.info("Initialization phase of GMKNN begins") self.MKNN_init() GraphClusterer_WI_semisupervised.configdata.logger.info( "Initialization phase of GMKNN ends.") GraphClusterer_WI_semisupervised.configdata.logger.info( "Working of G-MKNN begins") GraphClusterer_WI_semisupervised.configdata.logger.info( "Running G-MKNN for different values of K.") K_range = list( range( (int)(GraphClusterer_WI_semisupervised.configdata.K_min), (int)(GraphClusterer_WI_semisupervised.configdata.K_max) + 1)) for K in K_range: GraphClusterer_WI_semisupervised.configdata.logger.info( "Running G-MKNN for the value of K: ") GraphClusterer_WI_semisupervised.configdata.logger.info(K) #Call MKNN_worker for the value of K self.MKNN_worker(K) #(CL_List_P2, CL_List_P1, SM, SM_orig, num_clusters_P2, num_clusters_P1, num_nodes) = MKNN_worker(K, max_num_clusters, SM, SM_orig, num_nodes, node_codes, currentdate_str, dataset_name, eval_results_dir, log) #Plot evaluation measures for all values of K for phase 1 self.phase1_allKevaluationdata.plot_evaluation_measures_for_all_K() #Plot evaluation measures for all values of K for phase 2 self.phase2_allKevaluationdata.plot_evaluation_measures_for_all_K() ############################################ #Set up configuration #Set up the input matrices for the algorithm ############################################ def MKNN_init(self): #setting up configuration GraphClusterer_WI_semisupervised.configdata.do_config() GraphClusterer_WI_semisupervised.configdata.data_dir logger = GraphClusterer_WI_semisupervised.configdata.logger logger.debug("Debugging from inside MKNN_init method") #Instantiate GraphData_Gene GraphClusterer_WI_semisupervised.graphdata = GraphData_Gene( GraphClusterer_WI_semisupervised.configdata.GO_TYPE, GraphClusterer_WI_semisupervised.configdata.GO_SIZE, GraphClusterer_WI_semisupervised.configdata.GO_SIM_TYPE, GraphClusterer_WI_semisupervised.configdata.go_obo_file, GraphClusterer_WI_semisupervised.configdata.gene_syn_file) #setting up the input matrices #Create SM and SM_orig GraphClusterer_WI_semisupervised.graphdata.create_SM_from_relfile( GraphClusterer_WI_semisupervised.configdata.inp_rel_file) #Expand SM #GraphClusterer_WI_semisupervised.graphdata.setup_expanded_SM(GraphClusterer_WI_semisupervised.configdata.nhops, GraphClusterer_WI_semisupervised.configdata.inp_rel_file) #Setup GOsim based SM_GO #GraphClusterer_WI_semisupervised.graphdata.setup_SM_GO(GraphClusterer_WI_semisupervised.configdata.inp_rel_file, GraphClusterer_WI_semisupervised.configdata.gene2go_file) GraphClusterer_WI_semisupervised.graphdata.setup_SM_GO_1( GraphClusterer_WI_semisupervised.configdata.inp_rel_file, GraphClusterer_WI_semisupervised.configdata.gene2go_file) #Create Edge Objects GraphClusterer_WI_semisupervised.graphdata.create_edge_objects() #self.helper.print_dict(GraphClusterer.graphdata.edge_dict) #self.helper.print_set(GraphClusterer.graphdata.node_dict[1].node_edges_dict[EdgeType.secondary]) #print(GraphClusterer.graphdata.edge_dict[011].edge_id) #Initialize all K evaluation objects for both phases self.phase1_allKevaluationdata = AllKEvaluationData( self.configdata, 1) #1 for phase=1 self.phase2_allKevaluationdata = AllKEvaluationData( self.configdata, 2) #2 for phase=2 print("MKNN_Init phase complete.") ############################# #Run MKNN for one value of K ############################# def MKNN_worker(self, K): self.phase1data = Phase1Data_WI_Gene( GraphClusterer_WI_semisupervised.graphdata, GraphClusterer_WI_semisupervised.configdata, K) self.MKNN_Phase1() self.phase2data = Phase2Data_WI_Gene( GraphClusterer_WI_semisupervised.graphdata, GraphClusterer_WI_semisupervised.configdata, K, self.phase1data.cnodes_dict, self.phase1data.next_cluster_label, self.phase1data.num_clusters) self.MKNN_Phase2() def MKNN_Phase1(self): #Initialize Phase 1 self.phase1data.initialize_phase() #self.helper.print_list(self.phase1data.graphdata.node_dict[10].MKNN_list) #print('Degree') #print((self.phase1data.graphdata.node_dict[10].degree)) #print('CI_list') #self.helper.print_list(self.phase1data.cluster_initiator_list) #self.helper.convert_list_ids_to_codes(self.graphdata, self.phase1data.cluster_initiator_list) #print((self.phase1data.graphdata.CI_list[0])) #Execute Phase 1 self.phase1data.execute_phase() #Visualize Phase 1 results self.phase1data.visualize_phase() #Evaluate phase self.phase1data.evaluate_phase() self.phase1_allKevaluationdata.add_evaluation_for_K( self.phase1data.phase1_evaluation_data) print("MKNN_Phase1 complete.") def MKNN_Phase2(self): #Initialize phase 2 self.phase2data.initialize_phase() #self.helper.print_list(self.phase2data.c_SM) #self.helper.print_list(self.phase2data.c_SM_sort) #Execute phase 2 self.phase2data.execute_phase() #Visualize phase self.phase2data.visualize_phase() #Evaluate Phase self.phase2data.evaluate_phase() self.phase2_allKevaluationdata.add_evaluation_for_K( self.phase2data.phase2_evaluation_data) #self.helper.print_list(self.phase2data.phase2_evaluation_data.gold_standard_CL_list) self.helper.print_list( self.phase2data.phase2_evaluation_data.contingency_matrix) print("sensitivity:") print(self.phase2data.phase2_evaluation_data.sensitivity) print("PPV") print(self.phase2data.phase2_evaluation_data.PPV) print("accuracy") print(self.phase2data.phase2_evaluation_data.accuracy)
class SourceController(object): """Source control command controller""" CACHE = dict() def __init__(self, owner): """Create the SourceController @param owner: Owner window """ object.__init__(self) # Attributes self._parent = owner self._pid = self._parent.GetId() self.config = ConfigData() # Singleton config data instance self.tempdir = None self.scThreads = {} # Number of seconds to allow a source control command to run # before timing out self.scTimeout = 60 def __del__(self): # Clean up tempdir if self.tempdir: shutil.rmtree(self.tempdir, ignore_errors=True) #diffwin.CleanupTempFiles() # Stop any currently running source control threads for t in self.scThreads: t._Thread__stop() def _TimeoutCommand(self, callback, *args, **kwargs): """ Run command, but kill it if it takes longer than `timeout` secs @param callback: callable to call with results from command """ result = [] def resultWrapper(result, *args, **kwargs): """ Function to catch output of threaded method """ args = list(args) method = args.pop(0) result.append(method(*args, **kwargs)) # Insert result object to catch output args = list(args) args.insert(0, result) # Start thread t = threading.Thread(target=resultWrapper, args=args, kwargs=kwargs) t.start() self.scThreads[t] = True t.join(self.scTimeout) del self.scThreads[t] if t.isAlive(): t._Thread__stop() return False if callback is not None: callback(result[0]) return True def CompareRevisions(self, path, rev1=None, date1=None, rev2=None, date2=None): """ Compare the playpen path to a specific revision, or compare two revisions Required Arguments: path -- absolute path of file to compare Keyword Arguments: rev1/date1 -- first file revision/date to compare against rev2/date2 -- second file revision/date to campare against """ djob = ScCommandThread(self._parent, self.Diff, ppEVT_DIFF_COMPLETE, args=(path, rev1, date1, rev2, date2)) djob.setDaemon(True) djob.start() def Diff(self, path, rev1, date1, rev2, date2): """ Do the actual diff of two files by sending the files to be compared to the appropriate diff program. @return: tuple (None, err_code) """ # Only do files if os.path.isdir(path): for fname in os.listdir(path): self.CompareRevisions(fname, rev1=rev1, date1=date1, rev2=rev2, date2=date2) return # Check if path is under source control sc = self.GetSCSystem(path) if sc is None: return None content1 = content2 = ext1 = ext2 = None # Grab the first specified revision if rev1 or date1: content1 = sc['instance'].fetch([path], rev=rev1, date=date1) if content1 and content1[0] is None: return None, SC_ERROR_RETRIEVAL_FAIL else: content1 = content1[0] if rev1: ext1 = rev1 elif date1: ext1 = date1 # Grab the second specified revision if rev2 or date2: content2 = sc['instance'].fetch([path], rev=rev2, date=date2) if content2 and content2[0] is None: return None, SC_ERROR_RETRIEVAL_FAIL else: content2 = content2[0] if rev2: ext2 = rev2 elif date2: ext2 = date2 if not (rev1 or date1 or rev2 or date2): content1 = sc['instance'].fetch([path]) if content1 and content1[0] is None: return None, SC_ERROR_RETRIEVAL_FAIL else: content1 = content1[0] ext1 = 'previous' if not self.tempdir: self.tempdir = tempfile.mkdtemp() # Write temporary files path1 = path2 = None if content1 and content2: path = os.path.join(self.tempdir, os.path.basename(path)) path1 = '%s.%s' % (path, ext1) path2 = '%s.%s' % (path, ext2) tfile = open(path1, 'w') tfile.write(content1) tfile.close() tfile2 = open(path2, 'w') tfile2.write(content2) tfile2.close() elif content1: path1 = path path = os.path.join(self.tempdir, os.path.basename(path)) path2 = '%s.%s' % (path, ext1) tfile = open(path2, 'w') tfile.write(content1) tfile.close() elif content2: path1 = path path = os.path.join(self.tempdir, os.path.basename(path)) path2 = '%s.%s' % (path, ext2) tfile2 = open(path2, 'w') tfile2.write(content2) tfile2.close() # Run comparison program # if self.config.getBuiltinDiff() or not self.config.getDiffProgram(): # diffwin.GenerateDiff(path2, path1, html=True) # elif isinstance(path2, basestring) and isinstance(path2, basestring): if isinstance(path2, basestring) and isinstance(path2, basestring): subprocess.call([self.config.getDiffProgram(), path2, path1]) else: return (None, SC_ERROR_RETRIEVAL_FAIL) return (None, SC_ERROR_NONE) def GetSCSystem(self, path): """ Determine source control system being used on path if any @todo: possibly cache paths that are found to be under source control and the systems the belong to in order to improve performance """ # XXX: Experimental caching of paths to speed up commands. # Currently the improvements are quite mesurable, need # to monitor memory usage and end cases though. systems = self.config.getSCSystems() if path in SourceController.CACHE: return systems[SourceController.CACHE[path]] for key, value in systems.items(): if value['instance'].isControlled(path): SourceController.CACHE[path] = key return value def IsSingleRepository(self, paths): """ Are all paths from the same repository ? Required Arguments: nodes -- list of paths to test Returns: boolean indicating if all nodes are in the same repository (True), or if they are not (False). """ previous = '' for path in paths: try: reppath = self.GetSCSystem(path)['instance'].getRepository(path) except: continue if not previous: previous = reppath elif previous != reppath: return False return True def ScCommand(self, nodes, command, callback=None, **options): """ Run a source control command Required Arguments: nodes -- selected tree nodes [(treeitem, dict(path='', watcher=thread)] command -- name of command type to run """ cjob = ScCommandThread(self._parent, self.RunScCommand, ppEVT_CMD_COMPLETE, args=(nodes, command, callback), kwargs=options) cjob.setDaemon(True) cjob.start() def RunScCommand(self, nodes, command, callback, **options): """Does the running of the command @param nodes: list [(node, data), (node2, data2), ...] @param command: command string @return: (command, None) """ concurrentcmds = ['status', 'history'] NODE, DATA, SC = 0, 1, 2 nodeinfo = [] sc = None for node, data in nodes: # node, data, sc info = [node, data, None] # See if the node already has an operation running i = 0 while data.get('sclock', None): time.sleep(1) i += 1 if i > self.scTimeout: return (None, None) # See if the node has a path associated # Technically, all nodes should (except the root node) if 'path' not in data: continue # Determine source control system sc = self.GetSCSystem(data['path']) if sc is None: if os.path.isdir(data['path']) or command == 'add': sc = self.GetSCSystem(os.path.dirname(data['path'])) if sc is None: continue else: continue info[SC] = sc nodeinfo.append(info) # Check if the sc was found if sc is None: return (None, None) # Lock node while command is running if command not in concurrentcmds: for node, data, sc in nodeinfo: data['sclock'] = command rc = True try: # Find correct method method = getattr(sc['instance'], command, None) if method: # Run command (only if it isn't the status command) if command != 'status': rc = self._TimeoutCommand(callback, method, [x[DATA]['path'] for x in nodeinfo], **options) finally: # Only update status if last command didn't time out if command not in ['history', 'revert', 'update'] and rc: for node, data, sc in nodeinfo: self.StatusWithTimeout(sc, node, data) # Unlock if command not in concurrentcmds: for node, data, sc in nodeinfo: del data['sclock'] return (command, None) def StatusWithTimeout(self, sc, node, data, recursive=False): """Run a SourceControl status command with a timeout @param sc: SourceControll instance @param node: tree node, data @param data: data dict(path='') """ status = {} try: rval = self._TimeoutCommand(None, sc['instance'].status, [data['path']], recursive=recursive, status=status) except Exception, msg: print "ERROR:", msg evt = SourceControlEvent(ppEVT_STATUS, self._pid, (node, data, status, sc)) wx.PostEvent(self._parent, evt)
class SourceController(object): """Source control command controller""" CACHE = dict() def __init__(self, owner): """Create the SourceController @param owner: Owner window """ object.__init__(self) # Attributes self._parent = owner self._pid = self._parent.GetId() self.config = ConfigData() # Singleton config data instance self.tempdir = None self.scThreads = {} # Number of seconds to allow a source control command to run # before timing out self.scTimeout = 60 def __del__(self): # Clean up tempdir if self.tempdir: shutil.rmtree(self.tempdir, ignore_errors=True) #diffwin.CleanupTempFiles() # Stop any currently running source control threads for t in self.scThreads: t._Thread__stop() def _TimeoutCommand(self, callback, *args, **kwargs): """ Run command, but kill it if it takes longer than `timeout` secs @param callback: callable to call with results from command """ result = [] def resultWrapper(result, *args, **kwargs): """ Function to catch output of threaded method """ args = list(args) method = args.pop(0) result.append(method(*args, **kwargs)) # Insert result object to catch output args = list(args) args.insert(0, result) # Start thread t = threading.Thread(target=resultWrapper, args=args, kwargs=kwargs) t.start() self.scThreads[t] = True t.join(self.scTimeout) del self.scThreads[t] if t.isAlive(): t._Thread__stop() return False if callback is not None: callback(result[0]) return True def CompareRevisions(self, path, rev1=None, date1=None, rev2=None, date2=None): """ Compare the playpen path to a specific revision, or compare two revisions Required Arguments: path -- absolute path of file to compare Keyword Arguments: rev1/date1 -- first file revision/date to compare against rev2/date2 -- second file revision/date to campare against """ djob = ScCommandThread(self._parent, self.Diff, ppEVT_DIFF_COMPLETE, args=(path, rev1, date1, rev2, date2)) djob.setDaemon(True) djob.start() def Diff(self, path, rev1, date1, rev2, date2): """ Do the actual diff of two files by sending the files to be compared to the appropriate diff program. @return: tuple (None, err_code) """ # Only do files if os.path.isdir(path): for fname in os.listdir(path): self.CompareRevisions(fname, rev1=rev1, date1=date1, rev2=rev2, date2=date2) return # Check if path is under source control sc = self.GetSCSystem(path) if sc is None: return None content1 = content2 = ext1 = ext2 = None # Grab the first specified revision if rev1 or date1: content1 = sc['instance'].fetch([path], rev=rev1, date=date1) if content1 and content1[0] is None: return None, SC_ERROR_RETRIEVAL_FAIL else: content1 = content1[0] if rev1: ext1 = rev1 elif date1: ext1 = date1 # Grab the second specified revision if rev2 or date2: content2 = sc['instance'].fetch([path], rev=rev2, date=date2) if content2 and content2[0] is None: return None, SC_ERROR_RETRIEVAL_FAIL else: content2 = content2[0] if rev2: ext2 = rev2 elif date2: ext2 = date2 if not (rev1 or date1 or rev2 or date2): content1 = sc['instance'].fetch([path]) if content1 and content1[0] is None: return None, SC_ERROR_RETRIEVAL_FAIL else: content1 = content1[0] ext1 = 'previous' if not self.tempdir: self.tempdir = tempfile.mkdtemp() # Write temporary files path1 = path2 = None if content1 and content2: path = os.path.join(self.tempdir, os.path.basename(path)) path1 = '%s.%s' % (path, ext1) path2 = '%s.%s' % (path, ext2) tfile = open(path1, 'w') tfile.write(content1) tfile.close() tfile2 = open(path2, 'w') tfile2.write(content2) tfile2.close() elif content1: path1 = path path = os.path.join(self.tempdir, os.path.basename(path)) path2 = '%s.%s' % (path, ext1) tfile = open(path2, 'w') tfile.write(content1) tfile.close() elif content2: path1 = path path = os.path.join(self.tempdir, os.path.basename(path)) path2 = '%s.%s' % (path, ext2) tfile2 = open(path2, 'w') tfile2.write(content2) tfile2.close() # Run comparison program # if self.config.getBuiltinDiff() or not self.config.getDiffProgram(): # diffwin.GenerateDiff(path2, path1, html=True) # elif isinstance(path2, basestring) and isinstance(path2, basestring): if isinstance(path2, basestring) and isinstance(path2, basestring): subprocess.call([self.config.getDiffProgram(), path2, path1]) else: return (None, SC_ERROR_RETRIEVAL_FAIL) return (None, SC_ERROR_NONE) def GetSCSystem(self, path): """ Determine source control system being used on path if any @todo: possibly cache paths that are found to be under source control and the systems the belong to in order to improve performance """ # XXX: Experimental caching of paths to speed up commands. # Currently the improvements are quite mesurable, need # to monitor memory usage and end cases though. systems = self.config.getSCSystems() if path in SourceController.CACHE: return systems[SourceController.CACHE[path]] for key, value in systems.items(): if value['instance'].isControlled(path): SourceController.CACHE[path] = key return value def IsSingleRepository(self, paths): """ Are all paths from the same repository ? Required Arguments: nodes -- list of paths to test Returns: boolean indicating if all nodes are in the same repository (True), or if they are not (False). """ previous = '' for path in paths: try: reppath = self.GetSCSystem(path)['instance'].getRepository( path) except: continue if not previous: previous = reppath elif previous != reppath: return False return True def ScCommand(self, nodes, command, callback=None, **options): """ Run a source control command Required Arguments: nodes -- selected tree nodes [(treeitem, dict(path='', watcher=thread)] command -- name of command type to run """ cjob = ScCommandThread(self._parent, self.RunScCommand, ppEVT_CMD_COMPLETE, args=(nodes, command, callback), kwargs=options) cjob.setDaemon(True) cjob.start() def RunScCommand(self, nodes, command, callback, **options): """Does the running of the command @param nodes: list [(node, data), (node2, data2), ...] @param command: command string @return: (command, None) """ concurrentcmds = ['status', 'history'] NODE, DATA, SC = 0, 1, 2 nodeinfo = [] sc = None for node, data in nodes: # node, data, sc info = [node, data, None] # See if the node already has an operation running i = 0 while data.get('sclock', None): time.sleep(1) i += 1 if i > self.scTimeout: return (None, None) # See if the node has a path associated # Technically, all nodes should (except the root node) if 'path' not in data: continue # Determine source control system sc = self.GetSCSystem(data['path']) if sc is None: if os.path.isdir(data['path']) or command == 'add': sc = self.GetSCSystem(os.path.dirname(data['path'])) if sc is None: continue else: continue info[SC] = sc nodeinfo.append(info) # Check if the sc was found if sc is None: return (None, None) # Lock node while command is running if command not in concurrentcmds: for node, data, sc in nodeinfo: data['sclock'] = command rc = True try: # Find correct method method = getattr(sc['instance'], command, None) if method: # Run command (only if it isn't the status command) if command != 'status': rc = self._TimeoutCommand( callback, method, [x[DATA]['path'] for x in nodeinfo], **options) finally: # Only update status if last command didn't time out if command not in ['history', 'revert', 'update'] and rc: for node, data, sc in nodeinfo: self.StatusWithTimeout(sc, node, data) # Unlock if command not in concurrentcmds: for node, data, sc in nodeinfo: del data['sclock'] return (command, None) def StatusWithTimeout(self, sc, node, data, recursive=False): """Run a SourceControl status command with a timeout @param sc: SourceControll instance @param node: tree node, data @param data: data dict(path='') """ status = {} try: rval = self._TimeoutCommand(None, sc['instance'].status, [data['path']], recursive=recursive, status=status) except Exception, msg: print "ERROR:", msg evt = SourceControlEvent(ppEVT_STATUS, self._pid, (node, data, status, sc)) wx.PostEvent(self._parent, evt)
class MKNN_Helper(object): CODE_DIR = '/Users/divya/work/repo/Dissertation' #CODE_DIR = '/Users/divya/Documents/Dissertation/Dissertation' DATA_DIR = '/Users/divya/Documents/input/Dissertation/data' LOG_DIR = '/Users/divya/Documents/logs/Dissertation' configdata = ConfigData(CODE_DIR, DATA_DIR, LOG_DIR) def __init__(self): MKNN_Helper.configdata.do_config() ################################################### #Helper function: Calculate the union of two lists #Used in MKNN_init ################################################## def union(self, a, b): """ return the union of two lists """ return list(set(a) | set(b)) def print_list(self, list_1): for i in list_1: print(i) print(",") def print_set(self, set_1): for i in set_1: print(i) print(",") def print_dict(self, dict_1): for (key, value) in dict_1.items(): print("%s: %s" % (key, value)) def convert_list_ids_to_codes(self, graphdata, list_1): codes_list = [graphdata.node_dict[i].node_code for i in list_1] print(codes_list) def print_set_codes(self, set_1, node_dict): for i in set_1: print(node_dict[i].node_code) print(",") def print_list_codes(self, list_1, node_dict): for i in list_1: if i != -1: print(node_dict[i].node_code) else: print("-1") print(",") def print_clusters(self, clusters_list, cnodes_dict): nodeset = set() for cluster_id in clusters_list: cnode_data = cnodes_dict[cluster_id] nodeset = cnode_data.node_set def edit_cluster_label_list(self, cluster_label_list, node_dict, cnodes_dict, specific_set_to_draw, algorithm_name): for cluster_id in specific_set_to_draw: set_temp = set() set_temp.add(cluster_id) cnode_data = cnodes_dict[cluster_id] nodeset = cnode_data.node_set for node_id in nodeset: if algorithm_name == AlgorithmName.Clusterone: if len( set(node_dict[node_id].clusterone_clabel_dict.keys( )).intersection( specific_set_to_draw.difference( set_temp))) != 0: cluster_label_list[node_id] = -3 print("Specific 1") else: cluster_label_list[node_id] = cluster_id print("Specific 2") elif algorithm_name == AlgorithmName.GMKNN_ZhenHu: if set(node_dict[node_id].GMKNN_clabel_dict.keys( )).intersection( specific_set_to_draw.difference(set_temp)) != None: cluster_label_list[node_id] = -3 else: cluster_label_list[node_id] = cluster_id return cluster_label_list
class MailBox(MailLog): """ Email client that download unread messages and process them using registered handlers """ handlers = [] configs = None workspace = None receiver = None # receiver engine sender = None # send engine def __init__(self, config_file, workspace_folder): self.logger = logging.getLogger(self.logger_name + '.MailBox') self.workspace = workspace_folder self.configs = ConfigData(config_file) self.logger.debug('workspace "{}" configs: {}'.format( self.workspace, self.configs.toStringFiltered())) self.logger.debug('creating MailReceiver') self.receiver = MailReceiver(self.configs.imap,\ self.configs.imap_port,\ self.configs.ssl_enabled,\ self.configs.username,\ self.configs.password) self.logger.debug('creating MailSender') self.sender = MailSender(self.configs.smtp,\ self.configs.smtp_port,\ self.configs.username,\ self.configs.password) def process_unread_messages(self): workspace = os.path.join( self.workspace, "") #workspace is used to download attachments, if any unread_mails = self.receiver.get_unread_mails(workspace) for email in unread_mails: self.logger.debug('processing email [{}]'.format(email)) # call all handlers to process it for h in self.handlers: try: self.logger.debug('handler {} called'.format( self.handlers.index(h))) h.handle(email) except Exception as e: self.logger.error(e) pass # remove the temporary directory self.logger.debug('removing temporary directory "{}"'.format( email.workspace)) shutil.rmtree(email.workspace) # set as unread self.receiver.set_as_read(email) def send_email(self, mailData): self.logger.debug('sending email...') self.sender.send(mailData) self.logger.debug('sending complete') def add_mailbox_handler(self, handler): self.handlers.append(handler) self.logger.debug('added handler "{}"'.format(handler))