def runPoreDetection(inputImp, data, ops, display):

	name=inputImp.getTitle()	
	inputDataset=Utility.getDatasetByName(data, name)
	
	detectionParameters=DetectionParams()

	roi=inputImp.getRoi()
	if (roi is None):
		message=name+": "+Messages.NoRoi
		IJ.write(message)
		return

	roi=inputImp.getRoi().clone();

	header, statslist=poreDetectionTrueColor(inputImp, inputDataset, roi, ops, data, display, detectionParameters)

	directory, overlayname, roiname=Utility.createImageNames(inputImp)
	statsname=directory+'truecolor_stats.csv'
	
	IJ.save(inputImp, overlayname);
	IJ.saveAs(inputImp, "Selection", roiname);

	header.insert(0,Messages.FileName)
	statslist.insert(0,name)

	print header
	print statslist

	ExportDataFunction.exportSummaryStats(statsname, header, statslist)
Example #2
0
    def openFile(self):
        '''
        Open a file stream to either a rar/cbr or zip/cbz file
        '''
        inFile, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file',
                    QtCore.QDir.currentPath())

        self.currentPage = 0

        if zipfile.is_zipfile(inFile) == True:      #Check if its a zip file (.zip, .cbz)
            self.z = zipfile.ZipFile(inFile, "r")    
        elif rarfile.is_rarfile(inFile) == True:    #Check if its a rar file (.rar, .cbr)
            self.z = rarfile.RarFile(inFile)
        else:
            msgBox = QtGui.QMessageBox()
            msgBox.setText("This is not a valid CBZ or CBR file!")
            msgBox.setStandardButtons(QtGui.QMessageBox.Ok)
            msgBox.setDefaultButton(QtGui.QMessageBox.Ok)
            ret = msgBox.exec_()

            #if statement is probably unecessary
            if ret == QtGui.QMessageBox.Ok:
                self.openFile()

        self.showImage(self.currentPage)

        #Make the label clickable to go forward pages
        Utility.clickable(self.lbl).connect(self.changePage)

        self.scaleFactor = 1.0
        self.scaleImage(self.scaleFactor)
        self.updateActions()
Example #3
0
    def draw(self, surf):
        ### UPDATE
        # Handle easing
        current_time = pygame.time.get_ticks()
        if self.easing_flag:
            self.x = Utility.easing(current_time - self.start_time, self.old_x, self.target_x - self.old_x, 400)
            self.y = Utility.easing(current_time - self.start_time, self.old_y, self.target_y - self.old_y, 400)
            if self.target_x > self.old_x and self.x >= self.target_x or \
               self.target_x < self.old_x and self.x <= self.target_x or \
               self.target_y > self.old_y and self.y >= self.target_y or \
               self.target_y < self.old_y and self.y <= self.target_y:
                self.easing_flag = False
                self.x = self.target_x
                self.y = self.target_y

        ### DRAW
        image = self.sprite.copy()
        # Highlights
        for highlight in self.wm_highlights:
            highlight.draw(image)
        self.wm_highlights = [highlight for highlight in self.wm_highlights if not highlight.remove_clear]
        # Draw label
        for label in self.wm_labels:
            label.draw(image)
        # Update world_map_sprites
        for key,wm_unit in self.wm_sprites.iteritems():
            wm_unit.update()
        # World map sprites
        for key,wm_unit in self.wm_sprites.iteritems():
            wm_unit.draw(image)

        image = image.subsurface(self.x, self.y, WINWIDTH, WINHEIGHT)
        surf.blit(image, (0, 0))
Example #4
0
 def readTreeOfLife(self, file):
     Utility.isValid(file)
     toBeRead = Utility.countLines(file)
     sys.stderr.write(str(toBeRead)+" lines to read to construct tree\n")
     numOfLine = 0
     for line in open(file):
         parent = int(line.split()[0])
         kid=int(line.split()[1])
         if self.nodeExist(parent):
             if self.nodeExist(kid):
                 self.getNode(parent).addKid(kid)
                 self.getNode(kid).setParent(parent)
             else:
                 newNode = Taxon(kid)
                 self.addNode(newNode)
                 self.getNode(parent).addKid(kid)
                 self.getNode(kid).setParent(parent)
         else:
             if self.nodeExist(kid):
                 newNode = Taxon(parent) #WTF^^^^^
                 self.addNode(parent)
                 self.getNode(kid).setParent(parent)
                 self.getNode(parent).addKid(kid)
             else:
                 newKid = Taxon(kid)
                 newParent = Taxon(parent)
                 newKid.setParent(parent)
                 newParent.addKid(kid)
                 self.addNode(newKid)
                 self.addNode(newParent)
         numOfLine += 1
         if numOfLine % 100000 == 0:
             sys.stderr.write(str(numOfLine)+" lines read\n")
     sys.stderr.write("Tree base constructed\n")
def filter_out_uncovered_windows(islands_list, all_windows_list, window_size):
	"""
	This module uses islands list to filter the windows list, so that only 
	those windows on islands are recorded 
	
	Both input lists are lists of BED_GRAPH objects, each of BED GRAPH 
	object has tributes of start, end, and value. The islands MUST BE MADE 
	out of the corresponding summary graph file !!
	
	Windows that are in the gaps are also counted in.
	
	Both lists need to be sorted, which comes natually out of graphs and 
	islands.   They are with commensuate boundaries, as the islands are built out of the summary graphs. 
	
	"""
	assert( Utility.is_bed_sorted(islands_list) == 1 );
	assert( Utility.is_bed_sorted(all_windows_list) == 1 ); 
	
	isle_windows_list = find_isle_windows(islands_list, window_size);
	isle_windows_start_list=[];
	for window in isle_windows_list:
		isle_windows_start_list.append(window.start)
		
	all_windows_start_list = [];
	for window in all_windows_list:
		all_windows_start_list.append(window.start)
	
	# start position should be in the all_windows_start_list
	for index in xrange(len(isle_windows_start_list)):
		item = isle_windows_start_list[index];
		position = bisect.bisect_left(all_windows_start_list, item);
		if bisect.bisect_right(all_windows_start_list, item) - position == 1:
			isle_windows_list[index].value = all_windows_list[position].value;
	return isle_windows_list
def find_windows_on_islands(species, summary_graph_file, islands_file, window_size, out_file, window_read_count_threshold=0):
	summary_graph_extension=".summarygraph"
	island_extension=".islands"
	chroms = species_chroms[species];
	SeparateByChrom.separateByChrom(chroms, summary_graph_file, summary_graph_extension)
	SeparateByChrom.separateByChrom(chroms, islands_file, island_extension)
	
	windows_on_island={};
	for chrom in chroms:
		if Utility.fileExists(chrom+summary_graph_extension) and Utility.fileExists(chrom+island_extension):
			summary = BED.BED(species, chrom+summary_graph_extension, "BED_GRAPH", 0);
			islands = BED.BED(species, chrom+island_extension, "BED_GRAPH", 0);
			windows_on_island[chrom] = filter_out_uncovered_windows(islands[chrom], summary[chrom], window_size)
			
	if out_file !="":
		f = open(out_file, 'w')
		for chrom in chroms:
			if chrom in windows_on_island.keys():
				for item in windows_on_island[chrom]:
					if (item.value >= window_read_count_threshold):
						f.write(item.chrom + '\t' + str(item.start) +'\t'+ str(item.end) +'\t'+ str(item.value) + '\n')
		f.close()

	SeparateByChrom.cleanup(chroms, summary_graph_extension);
	SeparateByChrom.cleanup(chroms, island_extension);	
	return windows_on_island;
def post_response(request_dict, keep_connection, client_ip):
    if request_dict['Content-Type'][0] != 'multipart/form-data':
        return header_maker('400', keep_connection=False)
    boundary = None
    for content_info in request_dict['Content-Type']:
        if content_info.startswith('boundary='):
            boundary = content_info[9:]
    if boundary is None:
        return header_maker('400', keep_connection=False)
    info_list = request_dict['body'].split('--' + boundary)
    info_list = info_list[1:-1]
    for index, item in enumerate(info_list):
        info_list[index] = item[2:-2]
    contents_dicts = [{} for thank_you_frenkel in range(len(info_list))]
    for info_num, info in enumerate(info_list):
        contents_dicts[info_num]['header'] = info.split('\r\n\r\n')[0]
        contents_dicts[info_num]['data'] = info.split('\r\n\r\n')[1]
    for item_num, item in enumerate(contents_dicts):
        header = item['header'].split('\r\n')
        for line in header:
            if line.startswith('Content-Disposition: form-data; '):
                variables = line[32:].split('; ')
                variables_dict = {}
                for variable in variables:
                    variables_dict[variable.split('=')[0]] = variable.split('=')[1][1:-1]
        contents_dicts[item_num].update(variables_dict)
    file_ending = None
    file_name = None
    file_password = None
    file_data = None
    for content in contents_dicts:
        if content['name'] == 'name':
            file_name = content['data']
        elif content['name'] == 'password':
            file_password = content['data']
        elif content['name'] == 'file':
            if len(content['filename'].split('.')) == 1:
                file_ending = ''
            else:
                file_ending = content['filename'].split('.')[-1]
            file_data = content['data']
    if file_name is None or Utility.all_chars(file_name.replace('%20', ' '), ' ') or file_data is None:
        return header_maker('400', keep_connection=False)
    if file_ending != '':
        file_name = '%s.%s' % (file_name, file_ending)
    if file_password is None:
        file_password = ''
    file_saved = MemoryHandler.hash_save_file(file_name, file_password, file_data)
    if file_saved:
        message = '<i>file saved, you can get it at:</i><br><u>http://%s/getfile?name=%s' % (MemoryHandler.get_server_settings()['domain'], file_name.replace(' ', '%20'))
        if file_password != '':
            message += '&password='******' ', '%20')
        message += '</u>'
    else:
        message = 'file not saved'
    got_file, replay_file = MemoryHandler.get_server_file('uploadfile.html')
    if not got_file:
        return header_maker('500', keep_connection=False)
    new_replay_text = Utility.add_to_mark(replay_file, message)
    return header_maker('200', content_len=str(len(new_replay_text)), download_file=False, keep_connection=keep_connection, file_name='uploadfile.html') + new_replay_text
Example #8
0
    def init_html_content(self, filename):
        u.print_t("Parsing "+filename+" content...")

        # open html file, it's Unicode encoding.
        html_file = open(filename, encoding='utf-16')
        html_content = html_file.read()
        html_file.close()

        #remove \r\n and \n, re.S indicate that . matches\n
        p = re.compile(r"\r?\n",re.S)
        html_content = p.sub("",html_content)

        #remove the head
        p = re.compile(r'.*<TITLE>(.*)</TITLE>(.*<BODY>)?',re.S | re.I)
        m = p.match(html_content)
        if m:
            self.html_title = m.group(1)
            self.html_body = p.sub("",html_content)
        else:
            self.html_title = filename
            self.html_body = html_content

        #remove the tail
        p = re.compile(r'</BODY>(.*</HTML>)?',re.S | re.I) # if no body, but have html, there will be a problem
        self.html_body = p.sub("",self.html_body)


##global_index = 0
Example #9
0
	def __init__(self):
		self.inited = False
		self.addState(MyGame_Enter()) # MYGAME_ENTER
		self.addState(MyGame_Start()) # MYGAME_START
		self.addState(MyGame_Exit()) # MYGAME_EXIT
		self.run()
		Util.getMyApp().setTouchPrev(self.exit)
Example #10
0
    def is_valid_log_list_line(self, line):
        """ Ensure line matches syntax 'yyyy-mm-dd <stakeholder>: msg' """

        if re.match("^[0-9]{4}-[0-9]{2}-[0-9]{2}\ [A-Za-z0-9-]*:", line):
            return True
        else:
            Utility.report_error(1, '%s: Field "Change log" incorecctly specified "%s"' % (self._file_path, line))
Example #11
0
def speak_time():
	Utility.Sleep(100)
	rand = Utility.getRandom(0,1)
	hour = Utility.getHour()
	min = Utility.getMinute()
	if (rand == 0):
		SoundPlay("the time is")
		Utility.Sleep(100)
	else:
		SoundPlay("it is currently")
		Utility.Sleep(100)
	if (hour < 11):
		if (hour < 10):
			hour = hour[1:]
		NumberPlay(str(hour))
		speak_minute(min)
		SoundPlay("AM")
	elif (hour == 12):
		NumberPlay(str(hour))
		speak_minute(min)
		SoundPlay("PM")
	elif (hour > 12 & hour <= 23):
		b = 12
		newHour = hour - b
		NumberPlay(str(newHour))
		speak_minute(min)
		SoundPlay("PM")
def baseLineSVMT(distances, semanticLabels, targetTrainingIndice, targetTestingIndice):

    baseKernels = []
    for i in range(len(distances)):
        distance = distances[i]
        distance = distance ** 2
        trainingDistances = util.sliceArray(distance, targetTrainingIndice)

        # Define kernel parameters
        gramma0 = 1.0 / np.mean(trainingDistances)
        kernel_params = [gramma0 *(2 ** index) for index in range(-3, 2, 1)]

        # Construct base kernels & pre-learned classifier
        baseKernel = util.constructBaseKernels(["rbf", "lap", "isd","id"], kernel_params, distance)
        baseKernels += baseKernel

    trainLabels = [semanticLabels[i] for i in targetTrainingIndice]
    testLabels = [semanticLabels[i] for i in targetTestingIndice]

    coef = 1.0 / (len(baseKernels))
    finalKernel = coef * baseKernels[0]
    for baseKernel in baseKernels[1:]:
        finalKernel += coef * baseKernel

    trainKernels = util.sliceArray(finalKernel, targetTrainingIndice)
    testKernel = finalKernel[np.ix_(targetTestingIndice, targetTrainingIndice)]

    clf = SVC(kernel="precomputed")
    clf.fit(trainKernels, trainLabels)
    ap = clf.score(testKernel, testLabels)

    print "BaseLine: "+str(ap)

    return ap
Example #13
0
 def handle_device_test_finish(data):
     """ test finished without reset """
     # in this scenario reset should not happen
     if int(data[1]):
         # case ignored
         Utility.console_log("Ignored: " + self.child_case_name, color="orange")
     one_device_case_finish(not int(data[0]))
Example #14
0
def multiple_devices_case(env, extra_data):
    """
     extra_data can be two types of value
     1. as dict:
            e.g.
                {"name":  "gpio master/slave test example",
                "child case num": 2,
                "config": "release",
                "env_tag": "UT_T2_1"}
     2. as list dict:
            e.g.
               [{"name":  "gpio master/slave test example1",
                "child case num": 2,
                "config": "release",
                "env_tag": "UT_T2_1"},
               {"name":  "gpio master/slave test example2",
                "child case num": 2,
                "config": "release",
                "env_tag": "UT_T2_1"}]

    """
    failed_cases = []
    case_config = format_test_case_config(extra_data)
    DUTS = {}
    for ut_config in case_config:
        for one_case in case_config[ut_config]:
            case_run(DUTS, ut_config, env, one_case, failed_cases)

    if failed_cases:
        Utility.console_log("Failed Cases:", color="red")
        for _case_name in failed_cases:
            Utility.console_log("\t" + _case_name, color="red")
        raise AssertionError("Unit Test Failed")
Example #15
0
    def setFromTrajectory(self, trajectory, step = None):
        """Set the state of the universe to the one stored in
        the given |step| of the given |trajectory|. If no step number
        is given, the most recently written step is used for a restart
        trajectory, and the first step (number zero) for a normal
        trajectory.

        This operation is thread-safe; it blocks other threads that
        want to access the configuration or velocities while the data is
        being updated."""
        if step is None:
            step = trajectory.defaultStep()
        self.acquireWriteStateLock()
        try:
            self.setConfiguration(trajectory.configuration[step], 0)
            vel = self.velocities()
            try:
                vel_tr = trajectory.velocities[step]
            except AttributeError:
                if vel is not None:
                    Utility.warning("velocities were not modified because " +
                                    "the trajectory does not contain " +
                                    "velocity data.")
                return
            if vel is None:
                self._atom_properties['velocity'] = vel_tr
            else:
                vel.assign(vel_tr)
        finally:
            self.releaseWriteStateLock()
Example #16
0
	def read_received_message_with_phonenumber(self,src_number,except_msg=''):
		""" open the messaging app """
		self.__open_messaging_app()
		""" open the new message with source phone number """
		self.a.log.info_log(self.__tag,'open the new message with source phone number')
		res = self.a.inputer.get(ObjectIDDict.CONVERSATION_LISTITEM,'Name',0,ObjectIDDict.CONVERSATION_LISTVIEW,'-1')
		if(self.a.log.analysis_receive_msg_log_level(self.__tag,res)!=Constant.LOG_LEVEL_SUCCESS):
			return False
		self.a.log.info_log(self.__tag,'select the conversation with source phone number')
		index = Utility.search_key_of_value_part_match(res['PropertyDict'],src_number,'\r\n',0,'+86')
		if(index==-1):
			return False
		res = self.a.inputer.click(ObjectIDDict.CONVERSATION_LISTITEM,0,ObjectIDDict.CONVERSATION_LISTVIEW,str(index))
		if(self.a.log.analysis_receive_msg_log_level(self.__tag,res)!=Constant.LOG_LEVEL_SUCCESS):
			return False
		if(not self.a.validate.validate_object_for_next_step(ObjectIDDict.MESSAGE_LIST_ITEM,2,'message list')):
			return False
		""" check the nearest message in source phone number """
		res = self.a.inputer.get(ObjectIDDict.MESSAGE_LIST_ITEM,'Name',0,ObjectIDDict.MESSAGE_LIST)
		if(self.a.log.analysis_receive_msg_log_level(self.__tag,res)!=Constant.LOG_LEVEL_SUCCESS):
			return False
		msg_content = Utility.get_str_with_index(res['Property'],'\r\n',1)
		#print msg_content
		if(not self.a.validate.validate_string(msg_content,except_msg)):
			return False
		""" back to the main page """
		self.a.log.info_log(self.__tag,'back to the main page')
		res = self.a.inputer.hard('start',1)
		if(self.a.log.analysis_receive_msg_log_level(self.__tag,res)!=Constant.LOG_LEVEL_SUCCESS):
			return False
		if(not self.a.validate.validate_object_for_next_step(ObjectIDDict.MESSAGINGAPP_BUTTON,2,'message app')):
			return False
		return True
Example #17
0
 def countCitedJournals(self,result,wline):
 #Takes cited reference data from each article record in a Web of Science file
 #and counts frequency of journals over all citations
     citations = wline.CR.split('; ')
     i = 0
     for record in citations:
         entries = record.split(', ')
         try:
             if(len(entries) < 3):
             #assumes last entry is source if some data is missing
                 source = entries[len(entries)-1]
             else:
                 source = entries[2]
                 
             Utility.addToHistogram(result,source,"capitalized")
         except IndexError:
             print("Processing Error. See data:")
             if(i>0):
                 print(citations[i-1],"||-||")
             print(record,"||-||")
             if(i+1 < len(citations)):
                 print(citations[i+1])
             
         i += 1
     return result
Example #18
0
File: PDB.py Project: fxia22/ASM_xf
    def close(self):
        "Closes the file. Must be called in order to prevent data loss."
	self.file.close()
	if self.warning:
	    Utility.warning('Some atoms are missing in the output file ' + \
			    'because their positions are undefined.')
	    self.warning = 0
Example #19
0
def leftover_data_test(dut, port):
    # Leftover data in POST is purged (valid and invalid URIs)
    Utility.console_log("[test] Leftover data in POST is purged (valid and invalid URIs) =>", end=' ')
    s = http.client.HTTPConnection(dut + ":" + port, timeout=15)

    s.request("POST", url='/leftover_data', body="abcdefghijklmnopqrstuvwxyz\r\nabcdefghijklmnopqrstuvwxyz")
    resp = s.getresponse()
    if not test_val("Partial data", "abcdefghij", resp.read().decode()):
        s.close()
        return False

    s.request("GET", url='/hello')
    resp = s.getresponse()
    if not test_val("Hello World Data", "Hello World!", resp.read().decode()):
        s.close()
        return False

    s.request("POST", url='/false_uri', body="abcdefghijklmnopqrstuvwxyz\r\nabcdefghijklmnopqrstuvwxyz")
    resp = s.getresponse()
    if not test_val("False URI Status", str(404), str(resp.status)):
        s.close()
        return False
    # socket would have been closed by server due to error
    s.close()

    s = http.client.HTTPConnection(dut + ":" + port, timeout=15)
    s.request("GET", url='/hello')
    resp = s.getresponse()
    if not test_val("Hello World Data", "Hello World!", resp.read().decode()):
        s.close()
        return False

    s.close()
    Utility.console_log("Success")
    return True
Example #20
0
 def connect(self, server):
     try:
         return self.servers_blog[server['name']]  # hold server connection. first time, should raise a KeyErr exception
     except:
         u.print_t("Connect to server %s..." % server['name'])
         self.servers_blog[server['name']] = self.server_class[server['system']](server)
         return self.servers_blog[server['name']]
Example #21
0
    def __init__(self, atoms, constraints):
	self.atoms = atoms
	natoms = len(self.atoms)
	nconst = reduce(operator.add, map(len, constraints))
	b = Numeric.zeros((nconst, natoms), Numeric.Float)
	c = Numeric.zeros((nconst,), Numeric.Float)
	i = 0
	for cons in constraints:
	    cons.setCoefficients(self.atoms, b, c, i)
	    i = i + len(cons)
	u, s, vt = LinearAlgebra.singular_value_decomposition(b)
	self.rank = 0
	for i in range(min(natoms, nconst)):
	    if s[i] > 0.:
		self.rank = self.rank + 1
	self.b = b
	self.bi = LinearAlgebra.generalized_inverse(b)
	self.p = Numeric.identity(natoms)-Numeric.dot(self.bi, self.b)
	self.c = c
	self.bi_c = Numeric.dot(self.bi, c)
	c_test = Numeric.dot(self.b, self.bi_c)
	if Numeric.add.reduce((c_test-c)**2)/nconst > 1.e-12:
	    Utility.warning("The charge constraints are inconsistent."
			    " They will be applied as a least-squares"
			    " condition.")
Example #22
0
 def handle_test_finish(data):
     """ test finished without reset """
     # in this scenario reset should not happen
     assert not exception_reset_list
     if int(data[1]):
         # case ignored
         Utility.console_log("Ignored: " + one_case["name"], color="orange")
     one_case_finish(not int(data[0]))
Example #23
0
 def one_case_finish(result):
     """ one test finished, let expect loop break and log result """
     test_finish.append(True)
     if result:
         Utility.console_log("Success: " + one_case["name"], color="green")
     else:
         failed_cases.append(one_case["name"])
         Utility.console_log("Failed: " + one_case["name"], color="red")
Example #24
0
def log_performance(item, value):
    """
    do print performance with pre-defined format to console

    :param item: performance item name
    :param value: performance value
    """
    Utility.console_log("[Performance][{}]: {}".format(item, value), "orange")
Example #25
0
 def __init__(self, b1, b2, ca):
     self.b1 = b1 # bond 1
     self.b2 = b2 # bond 2
     self.ca = ca # common atom
     if Utility.uniqueID(self.b2) < Utility.uniqueID(self.b1):
         self.b1, self.b2 = self.b2, self.b1
     self.a1 = b1.otherAtom(ca)
     self.a2 = b2.otherAtom(ca)
     Utility.uniqueID.registerObject(self)
Example #26
0
    def run(self):
        def get_child_case_name(data):
            self.child_case_name = data[0]
            time.sleep(1)
            self.dut.write(str(self.child_case_index))

        def one_device_case_finish(result):
            """ one test finished, let expect loop break and log result """
            self.finish = True
            self.result = result
            if not result:
                self.fail_name = self.child_case_name

        def device_wait_action(data):
            start_time = time.time()
            expected_signal = data[0]
            while 1:
                if time.time() > start_time + self.timeout:
                    Utility.console_log("Timeout in device for function: %s"%self.child_case_name, color="orange")
                    break
                with self.lock:
                    if expected_signal in self.sent_signal_list:
                        self.dut.write(" ")
                        self.sent_signal_list.remove(expected_signal)
                        break
                time.sleep(0.01)

        def device_send_action(data):
            with self.lock:
                self.sent_signal_list.append(data[0].encode('utf-8'))

        def handle_device_test_finish(data):
            """ test finished without reset """
            # in this scenario reset should not happen
            if int(data[1]):
                # case ignored
                Utility.console_log("Ignored: " + self.child_case_name, color="orange")
            one_device_case_finish(not int(data[0]))

        self.dut.reset()
        self.dut.write("-", flush=False)
        self.dut.expect_any(UT_APP_BOOT_UP_DONE, "0 Tests 0 Failures 0 Ignored")
        time.sleep(1)
        self.dut.write("\"{}\"".format(self.parent_case_name))
        self.dut.expect("Running " + self.parent_case_name + "...")

        while not self.finish:
            try:
                self.dut.expect_any((re.compile('\(' + str(self.child_case_index) + '\)\s"(\w+)"'), get_child_case_name),
                                    (self.WAIT_SIGNAL_PATTERN, device_wait_action),  # wait signal pattern
                                    (self.SEND_SIGNAL_PATTERN, device_send_action),  # send signal pattern
                                    (self.FINISH_PATTERN, handle_device_test_finish),  # test finish pattern
                                    timeout=UT_TIMEOUT)
            except ExpectTimeout:
                Utility.console_log("Timeout in expect", color="orange")
                one_device_case_finish(False)
                break
Example #27
0
 def close(self):
     "Closes the file. Must be called in order to prevent data loss."
     if self.model_number is not None:
         self.file.writeLine('ENDMDL', '')
     self.file.close()
     if self.warning:
         Utility.warning('Some atoms are missing in the output file ' + \
                         'because their positions are undefined.')
         self.warning = 0
Example #28
0
 def adder_result(self):
     if len(self.response) != self.depth:
         Utility.console_log("Error : missing response packets")
         return False
     for i in range(len(self.response)):
         if not test_val("Thread" + str(self.id) + " response[" + str(i) + "]",
                         str(self.id * (i + 1)), str(self.response[i])):
             return False
     return True
Example #29
0
 def get_active_categories(self, categories):
     exist = []
     try:
         elems = pyblog.WordPress.get_categories(self)
         for elem in elems:
             exist.append(elem['categoryName'])
     except:
         u.print_t('Get exist categories fail!')
     return u.get_intersection(exist, u.split_to_list(categories, ';', ''))   # jiao ji
Example #30
0
def setConfiguration(object, pdb_residues,
                     map = 'pdbmap', alt = 'pdb_alternative',
                     atom_map = None, toplevel = 1):
    defined = 0
    if hasattr(object, 'is_protein'):
        i = 0
        for chain in object:
            l = len(chain)
            defined = defined + setConfiguration(chain, pdb_residues[i:i+l],
                                                 map, alt, atom_map, 0)
            i = i + l
    elif hasattr(object, 'is_chain'):
        for i in range(len(object)):
            defined = defined + setConfiguration(object[i],
                                                 pdb_residues[i:i+1],
                                                 map, alt, atom_map, 0)
    elif hasattr(object, map):
        pdbmap = getattr(object, map)
        try: altmap = getattr(object, alt)
        except AttributeError: altmap = {}
        nres = len(pdb_residues)
        if len(pdbmap) != nres:
            raise IOError('PDB configuration does not match object ' +
                           object.fullName())
        for i in range(nres):
            defined = defined + setResidueConfiguration(object,
                                                        pdb_residues[i],
                                                        pdbmap[i], altmap,
                                                        atom_map)
    elif Collections.isCollection(object):
        nres = len(pdb_residues)
        if len(object) != nres:
            raise IOError('PDB configuration does not match object ' +
                           object.fullName())
        for i in range(nres):
            defined = defined + setConfiguration(object[i], [pdb_residues[i]],
                                                 map, alt, atom_map, 0)
    else:
        try:
            name = object.fullName()
        except AttributeError:
            try:
                name = object.name
            except AttributeError:
                name = '???'
        raise IOError('PDB configuration does not match object ' + name)
              
    if toplevel and defined < object.numberOfAtoms():
        name = '[unnamed object]'
        try:
            name = object.fullName()
        except: pass
        if name: name = ' in ' + name
        Utility.warning(`object.numberOfAtoms()-defined` + ' atom(s)' + name +
                        ' were not assigned (new) positions.')
    return defined
Example #31
0
def run_unit_test_cases(env, extra_data):
    """
    extra_data can be three types of value
    1. as string:
               1. "case_name"
               2. "case_name [reset=RESET_REASON]"
    2. as dict:
               1. with key like {"name": "Intr_alloc test, shared ints"}
               2. with key like {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET", "config": "psram"}
    3. as list of string or dict:
               [case1, case2, case3, {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}, ...]

    :param extra_data: the case name or case list or case dictionary
    :return: None
    """

    case_config = format_test_case_config(extra_data)

    # we don't want stop on failed case (unless some special scenarios we can't handle)
    # this flag is used to log if any of the case failed during executing
    # Before exit test function this flag is used to log if the case fails
    failed_cases = []

    for ut_config in case_config:
        Utility.console_log("Running unit test for config: " + ut_config, "O")
        dut = env.get_dut("unit-test-app", app_path=ut_config)
        if len(case_config[ut_config]) > 0:
            replace_app_bin(dut, "unit-test-app", case_config[ut_config][0].get('app_bin'))
        dut.start_app()

        for one_case in case_config[ut_config]:
            reset_dut(dut)

            # run test case
            dut.write("\"{}\"".format(one_case["name"]))
            dut.expect("Running " + one_case["name"] + "...")

            exception_reset_list = []

            # we want to set this flag in callbacks (inner functions)
            # use list here so we can use append to set this flag
            test_finish = list()

            # expect callbacks
            def one_case_finish(result):
                """ one test finished, let expect loop break and log result """
                test_finish.append(True)
                if result:
                    Utility.console_log("Success: " + one_case["name"], color="green")
                else:
                    failed_cases.append(one_case["name"])
                    Utility.console_log("Failed: " + one_case["name"], color="red")

            def handle_exception_reset(data):
                """
                just append data to exception list.
                exception list will be checked in ``handle_reset_finish``, once reset finished.
                """
                exception_reset_list.append(data[0])

            def handle_test_finish(data):
                """ test finished without reset """
                # in this scenario reset should not happen
                assert not exception_reset_list
                if int(data[1]):
                    # case ignored
                    Utility.console_log("Ignored: " + one_case["name"], color="orange")
                one_case_finish(not int(data[0]))

            def handle_reset_finish(data):
                """ reset happened and reboot finished """
                assert exception_reset_list  # reboot but no exception/reset logged. should never happen
                result = False
                if len(one_case["reset"]) == len(exception_reset_list):
                    for i, exception in enumerate(exception_reset_list):
                        if one_case["reset"][i] not in exception:
                            break
                    else:
                        result = True
                if not result:
                    Utility.console_log("""Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}"""
                                        .format(one_case["reset"], exception_reset_list),
                                        color="orange")
                one_case_finish(result)

            while not test_finish:
                try:
                    dut.expect_any((RESET_PATTERN, handle_exception_reset),
                                   (EXCEPTION_PATTERN, handle_exception_reset),
                                   (ABORT_PATTERN, handle_exception_reset),
                                   (FINISH_PATTERN, handle_test_finish),
                                   (UT_APP_BOOT_UP_DONE, handle_reset_finish),
                                   timeout=one_case["timeout"])
                except ExpectTimeout:
                    Utility.console_log("Timeout in expect", color="orange")
                    one_case_finish(False)
                    break

    # raise exception if any case fails
    if failed_cases:
        Utility.console_log("Failed Cases:", color="red")
        for _case_name in failed_cases:
            Utility.console_log("\t" + _case_name, color="red")
        raise AssertionError("Unit Test Failed")
    def __init__(self, *items, **properties):
        if items == (None, ):
            return
        self.name = ''
        if len(items) == 1 and type(items[0]) == type(''):
            try:
                filename = Database.databasePath(items[0], 'Proteins')
                found = 1
            except IOError:
                found = 0
            if found:
                blueprint = Database.BlueprintProtein(items[0])
                items = blueprint.chains
                for attr, value in vars(blueprint).items():
                    if attr not in ['type', 'chains']:
                        setattr(self, attr, value)
            else:
                import PDB
                conf = PDB.PDBConfiguration(items[0])
                model = properties.get('model', 'all')
                items = conf.createPeptideChains(model)
        molecules = []
        for i in items:
            if ChemicalObjects.isChemicalObject(i):
                molecules.append(i)
            else:
                molecules = molecules + list(i)
        for m, i in zip(molecules, range(len(molecules))):
            m._numbers = [i]
            if not m.name:
                m.name = 'chain' + ` i `
        ss = self._findSSBridges(molecules)
        new_mol = {}
        for m in molecules:
            new_mol[m] = ([m], [])
        for bond in ss:
            m1 = new_mol[bond[0].topLevelChemicalObject()]
            m2 = new_mol[bond[1].topLevelChemicalObject()]
            if m1 == m2:
                m1[1].append(bond)
            else:
                combined = (m1[0] + m2[0], m1[1] + m2[1] + [bond])
                for m in combined[0]:
                    new_mol[m] = combined
        self.molecules = []
        while new_mol:
            m = new_mol.values()[0]
            for i in m[0]:
                del new_mol[i]
            bonds = m[1]
            if len(m[0]) == 1:
                m = m[0][0]
            else:
                numbers = reduce(operator.add, map(lambda i: i._numbers, m[0]))
                m = ConnectedChains(m[0])
                m._numbers = numbers
                for c in m:
                    c.parent = self
            m._addSSBridges(bonds)
            m.parent = self
            self.molecules.append(m)

        self.atoms = []
        self.chains = []
        for m in self.molecules:
            self.atoms.extend(m.atoms)
            if hasattr(m, 'is_connected_chains'):
                for c, name, i in zip(range(len(m)), m.chain_names,
                                      m._numbers):
                    self.chains.append((m, c, name, i))
            else:
                try:
                    name = m.name
                except AttributeError:
                    name = ''
                self.chains.append((m, None, name, m._numbers[0]))
        self.chains.sort(lambda c1, c2: cmp(c1[3], c2[3]))
        self.chains = map(lambda c: c[:3], self.chains)

        self.parent = None
        self.type = None
        self.configurations = {}
        try:
            self.name = properties['name']
            del properties['name']
        except KeyError:
            pass
        if properties.has_key('position'):
            self.translateTo(properties['position'])
            del properties['position']
        self.addProperties(properties)

        undefined = 0
        for a in self.atoms:
            if a.position() is None:
                undefined = undefined + 1
        if undefined > 0 and undefined != len(self.atoms):
            Utility.warning('Some atoms in a protein ' +
                            'have undefined positions.')
Example #33
0
def run_one_normal_case(dut, one_case, junit_test_case):

    reset_dut(dut)

    dut.start_capture_raw_data()
    # run test case
    dut.write("\"{}\"".format(one_case["name"]))
    dut.expect("Running " + one_case["name"] + "...")

    exception_reset_list = []

    # we want to set this flag in callbacks (inner functions)
    # use list here so we can use append to set this flag
    test_finish = list()

    # expect callbacks
    def one_case_finish(result):
        """ one test finished, let expect loop break and log result """
        test_finish.append(True)
        output = dut.stop_capture_raw_data()
        if result:
            Utility.console_log("Success: " + one_case["name"], color="green")
        else:
            Utility.console_log("Failed: " + one_case["name"], color="red")
            junit_test_case.add_failure_info(output)
            raise TestCaseFailed()

    def handle_exception_reset(data):
        """
        just append data to exception list.
        exception list will be checked in ``handle_reset_finish``, once reset finished.
        """
        exception_reset_list.append(data[0])

    def handle_test_finish(data):
        """ test finished without reset """
        # in this scenario reset should not happen
        assert not exception_reset_list
        if int(data[1]):
            # case ignored
            Utility.console_log("Ignored: " + one_case["name"], color="orange")
            junit_test_case.add_skipped_info("ignored")
        one_case_finish(not int(data[0]))

    def handle_reset_finish(data):
        """ reset happened and reboot finished """
        assert exception_reset_list  # reboot but no exception/reset logged. should never happen
        result = False
        if len(one_case["reset"]) == len(exception_reset_list):
            for i, exception in enumerate(exception_reset_list):
                if one_case["reset"][i] not in exception:
                    break
            else:
                result = True
        if not result:
            err_msg = "Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}".format(
                one_case["reset"], exception_reset_list)
            Utility.console_log(err_msg, color="orange")
            junit_test_case.add_error_info(err_msg)
        one_case_finish(result)

    while not test_finish:
        try:
            dut.expect_any((RESET_PATTERN, handle_exception_reset),
                           (EXCEPTION_PATTERN, handle_exception_reset),
                           (ABORT_PATTERN, handle_exception_reset),
                           (FINISH_PATTERN, handle_test_finish),
                           (UT_APP_BOOT_UP_DONE, handle_reset_finish),
                           timeout=one_case["timeout"])
        except ExpectTimeout:
            Utility.console_log("Timeout in expect", color="orange")
            junit_test_case.add_failure_info("timeout")
            one_case_finish(False)
            break
Example #34
0
def run_one_multiple_stage_case(dut, one_case, junit_test_case):
    reset_dut(dut)

    dut.start_capture_raw_data()

    exception_reset_list = []

    for test_stage in range(one_case["child case num"]):
        # select multi stage test case name
        dut.write("\"{}\"".format(one_case["name"]))
        dut.expect("Running " + one_case["name"] + "...")
        # select test function for current stage
        dut.write(str(test_stage + 1))

        # we want to set this flag in callbacks (inner functions)
        # use list here so we can use append to set this flag
        stage_finish = list()

        def last_stage():
            return test_stage == one_case["child case num"] - 1

        def check_reset():
            if one_case["reset"]:
                assert exception_reset_list  # reboot but no exception/reset logged. should never happen
                result = False
                if len(one_case["reset"]) == len(exception_reset_list):
                    for i, exception in enumerate(exception_reset_list):
                        if one_case["reset"][i] not in exception:
                            break
                    else:
                        result = True
                if not result:
                    err_msg = "Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}".format(
                        one_case["reset"], exception_reset_list)
                    Utility.console_log(err_msg, color="orange")
                    junit_test_case.add_failure_info(err_msg)
            else:
                # we allow omit reset in multi stage cases
                result = True
            return result

        # expect callbacks
        def one_case_finish(result):
            """ one test finished, let expect loop break and log result """
            # handle test finish
            result = result and check_reset()
            output = dut.stop_capture_raw_data()
            if result:
                Utility.console_log("Success: " + one_case["name"],
                                    color="green")
            else:
                Utility.console_log("Failed: " + one_case["name"], color="red")
                junit_test_case.add_failure_info(output)
                raise TestCaseFailed()
            stage_finish.append("break")

        def handle_exception_reset(data):
            """
            just append data to exception list.
            exception list will be checked in ``handle_reset_finish``, once reset finished.
            """
            exception_reset_list.append(data[0])

        def handle_test_finish(data):
            """ test finished without reset """
            # in this scenario reset should not happen
            if int(data[1]):
                # case ignored
                Utility.console_log("Ignored: " + one_case["name"],
                                    color="orange")
                junit_test_case.add_skipped_info("ignored")
            # only passed in last stage will be regarded as real pass
            if last_stage():
                one_case_finish(not int(data[0]))
            else:
                Utility.console_log("test finished before enter last stage",
                                    color="orange")
                one_case_finish(False)

        def handle_next_stage(data):
            """ reboot finished. we goto next stage """
            if last_stage():
                # already last stage, should never goto next stage
                Utility.console_log("didn't finish at last stage",
                                    color="orange")
                one_case_finish(False)
            else:
                stage_finish.append("continue")

        while not stage_finish:
            try:
                dut.expect_any((RESET_PATTERN, handle_exception_reset),
                               (EXCEPTION_PATTERN, handle_exception_reset),
                               (ABORT_PATTERN, handle_exception_reset),
                               (FINISH_PATTERN, handle_test_finish),
                               (UT_APP_BOOT_UP_DONE, handle_next_stage),
                               timeout=one_case["timeout"])
            except ExpectTimeout:
                Utility.console_log("Timeout in expect", color="orange")
                one_case_finish(False)
                break
        if stage_finish[0] == "break":
            # test breaks on current stage
            break
Example #35
0
 def get_positions(self, unit_position, cursor_position, tileMap, item):
     if self.mode == 'Normal':
         return cursor_position, []
     elif self.mode == 'Cleave_Old':
         other_position = []
         if cursor_position[1] < unit_position[1]:
             other_position.append(
                 (cursor_position[0] - 1, cursor_position[1]))
             other_position.append(
                 (cursor_position[0] + 1, cursor_position[1]))
         if cursor_position[0] < unit_position[0]:
             other_position.append(
                 (cursor_position[0], cursor_position[1] - 1))
             other_position.append(
                 (cursor_position[0], cursor_position[1] + 1))
         if cursor_position[0] > unit_position[0]:
             other_position.append(
                 (cursor_position[0], cursor_position[1] - 1))
             other_position.append(
                 (cursor_position[0], cursor_position[1] + 1))
         if cursor_position[1] > unit_position[1]:
             other_position.append(
                 (cursor_position[0] - 1, cursor_position[1]))
             other_position.append(
                 (cursor_position[0] + 1, cursor_position[1]))
         splash_positions = [
             position for position in other_position
             if tileMap.check_bounds(position)
         ]
         return cursor_position, splash_positions
     elif self.mode == 'Cleave':
         p = unit_position
         other_position = [(p[0] - 1, p[1] - 1), (p[0], p[1] - 1),
                           (p[0] + 1, p[1] - 1), (p[0] - 1, p[1]),
                           (p[0] + 1, p[1]), (p[0] - 1, p[1] + 1),
                           (p[0], p[1] + 1), (p[0] + 1, p[1] + 1)]
         splash_positions = {
             position
             for position in other_position
             if tileMap.check_bounds(position)
         }
         return cursor_position, list(splash_positions - {cursor_position})
     elif self.mode == 'Blast':
         splash_positions = Utility.find_manhattan_spheres(
             range(self.number + 1), cursor_position)
         splash_positions = {
             position
             for position in splash_positions
             if tileMap.check_bounds(position)
         }
         if item.weapon:
             return cursor_position, list(splash_positions -
                                          {cursor_position})
         else:
             return None, list(splash_positions)
     elif self.mode == 'Line':
         splash_positions = Utility.raytrace(unit_position, cursor_position)
         splash_positions = [
             position for position in splash_positions
             if position != unit_position
         ]
         return None, splash_positions
     else:
         print('Error! ' + self.mode + ' AOE mode is not supported yet!')
         return cursor_position, []
def runConceptAttribute(distances, labels, acturalSemanticLabels,auxiliaryTrainingIndices, targetTrainingIndices, targetTestingIndices, classificationMethod):

    all_trainingIndices = targetTrainingIndices

    baseKernels = []
    for i in range(len(distances)):
        distance = distances[i]
        distance = distance ** 2
        trainingDistances = util.sliceArray(distance, all_trainingIndices)

        # Define kernel parameters
        gramma0 = 1.0 / np.mean(trainingDistances)
        kernel_params = [gramma0 *(2 ** index) for index in range(-3, 2, 1)]

        # Construct base kernels & pre-learned classifier
        baseKernel = util.constructBaseKernels(["rbf", "lap", "isd","id"], kernel_params, distance)
        baseKernels += baseKernel

    # Train classifiers based on Youtube videos & Assign concept scores to target domain
    targetTrainingConceptScores = np.zeros((len(targetTrainingIndices), labels.shape[1]))
    targetTestingConceptScores = np.zeros((len(targetTestingIndices), labels.shape[1]))

    for classNum in range(labels.shape[1]):
        thisClassLabels = labels[::, classNum]
        auTrainingLabels = [thisClassLabels[index] for index in auxiliaryTrainingIndices]

        targetTrainDvs = []
        targetTestDvs = []
        for m in range(len(baseKernels)):
            baseKernel = baseKernels[m]
            auKtrain = util.sliceArray(baseKernel, auxiliaryTrainingIndices)
            Ktest = baseKernel[::, auxiliaryTrainingIndices]

            clf = SVC(kernel="precomputed")
            clf.fit(auKtrain, auTrainingLabels)
            dv = clf.decision_function(Ktest)

            targetTrainDv = [dv[index][0] for index in targetTrainingIndices]
            targetTrainDvs.append(targetTrainDv)

            targetTestDv = [dv[index][0] for index in targetTestingIndices]
            targetTestDvs.append(targetTestDv)

        targetTrainDvs = np.array(targetTrainDvs)
        targetTestDvs = np.array(targetTestDvs)

        # Fuse decision values from different kernels
        tempScores = 1.0 / (1 + math.e **(-targetTrainDvs))
        targetTrainDvs = np.mean(tempScores, axis = 0)

        tempScores = 1.0 / (1 + math.e ** (-targetTestDvs))
        targetTestDvs = np.mean(tempScores, axis = 0)

        for trainIndex in range(len(targetTrainingIndices)):
            targetTrainingConceptScores[trainIndex][classNum] = targetTrainDvs[trainIndex]
        for testIndex in range(len(targetTestingIndices)):
            targetTestingConceptScores[testIndex][classNum] = targetTestDvs[testIndex]

    # Use new representations to classify
    actualTrainLabels = [acturalSemanticLabels[i] for i in targetTrainingIndices]
    actualTestLabels = [acturalSemanticLabels[i] for i in targetTestingIndices]

    # Classify using different approaches
    if classificationMethod == "SVM":

        SVMmodel = SVC(kernel = "rbf")
        SVMmodel.fit(targetTrainingConceptScores, actualTrainLabels)
        ap = SVMmodel.score(targetTestingConceptScores, actualTestLabels)

        print "SVM_Rbf: " +str(ap)

    if classificationMethod == "NaiveBayes":
        gnb = GaussianNB()
        gnb.fit(targetTrainingConceptScores, actualTrainLabels)
        ap = gnb.score(targetTestingConceptScores, actualTestLabels)
        print "Naive Bayes: " +str(ap)


    return ap
Example #37
0
 def __transformToMetric(self):
     self.metricPressure = util.InTohPa(self.pressure)
     self.metricPressure = self.pressure
    trainingIndice, testingIndice = util.generateRandomIndices(semanticLabels, 3)

    base = baseLineSVMT(distance, semanticLabels, trainingIndice, testingIndice)

    if youtubeOrNot:
        auxiliaryTraining = [i for i in range(195, 1101, 1)]
    else:
        auxiliaryTraining = trainingIndice

    cs = runConceptAttribute(distance, binaryLabel, semanticLabels ,auxiliaryTraining, trainingIndice, testingIndice, classificationMethod)
    return base, cs


if __name__ == "__main__":

    distanceOne = util.loadObject("LevelZero/all_DistanceMatrix_Level0.pkl")
    labels = loadmat("labels.mat")['labels']

    semanticLabels = util.loadObject("LevelZero/all_labels_Level0.pkl")

    distances = []
    distances.append(distanceOne)

    binaryLabel = util.generateBinaryLabels(semanticLabels)

    semanticLabels = semanticLabels[:195]


    # Write result into an excel file
    import xlwt
    wb = xlwt.Workbook()
Example #39
0
    print str(kernels) + ": " + str(meanAP) + u"\u00B1" + str(sd)


if __name__ == "__main__":

    labels = loadmat("labels.mat")['labels']

    kernelChoices = []
    kernelChoices.append(["rbf"])
    kernelChoices.append(["lap"])
    kernelChoices.append(["isd"])
    kernelChoices.append(["id"])
    kernelChoices.append(["rbf", "lap", "isd", "id"])

    spherical128 = util.loadObject(
        "/Users/GongLi/PycharmProjects/DomainAdaption/Distances/GMM/Spherical Covariance/All 128/All_GMM_distances.pkl"
    )
    spherical_64 = util.loadObject(
        "/Users/GongLi/PycharmProjects/DomainAdaption/Distances/GMM/Spherical Covariance/PCA64_Spherical_GMM_n_iteration50_KodakDistances.pkl"
    )
    full_128 = util.loadObject(
        "/Users/GongLi/PycharmProjects/DomainAdaption/Distances/GMM/Full Covariance/128_Full_GMM_n_iteration50_KodakDistances.pkl"
    )
    full_64 = util.loadObject(
        "/Users/GongLi/PycharmProjects/DomainAdaption/Distances/GMM/Full Covariance/PCA64_FUll_GMM_n_iteration50_KodakDistances.pkl"
    )

    candidates_dis = []
    candidates_dis.append(spherical128)
    candidates_dis.append(spherical_64)
    candidates_dis.append(full_128)
def timeseriesDetection(dataFile, inputJSON,
                        p_m1=0.70, p_m2=0.25, p_m3=0.05,
                        p_num_bins=20, p_time_window=100, p_time_shift=20,
                        p_epsilon=0.025, p_gamma=0.3, p_alpha=0.59,
                        numDimensions=1):
    total_number_of_bins = p_num_bins
    time_window_in_seconds = p_time_window
    time_window_shift = p_time_shift
    # we will take average of 3 readings at every iostat command data record.
    lst_time_series_a = []
    lst_time_series_b = []

    # Not needed anymore, keeping for legacy purposes
    lst_avg_cpu_nice = []
    lst_avg_cpu_system = []
    lst_avg_cpu_iowait = []
    lst_avg_cpu_steal = []

    # Synthetic Random Anomaly Data
    lst_syn_cpu_data = []

    # this will collect the time iostat was run at each time
    lst_time = []
    lst_labels = []                 # anomaly labels (0, 1) for each data-point, by respective time-series index.
    lst_window_labels = []          # anomaly labels for each window, calculated from original individual data-labels.

    # this list of Arrays will keep the softmax'ed x
    lst_softmaxed = []

    # list of EigenValues/Vectors for each window, calculated from HenkelMatrix for that time window's bins array
    lst_eigenvalues = []
    lst_eigenvectors = []

    if not os.path.isfile(dataFile):
        print("[!] Couldn't find data file %s" % dataFile)
        sys.exit()

    if inputJSON:
        with open(dataFile, 'r') as f:
            data_dict = json.load(f)
        print("[+] Total number of items in tree_root: %d" % (len(data_dict["tree_root"])))

        for i in data_dict["tree_root"]:
            cur_t = i["iostat"]["date_time"]
            index = cur_t.rfind(":")
            cur_t = str(cur_t[:index] + "." + cur_t[index + 1:]).replace("T", " ")
            cur_t = dt.datetime.strptime(str(cur_t[:-3]), '%Y-%m-%d %H:%M:%S.%f')
            lst_time.append(cur_t)

            # "iowait": "0.08", "system": "0.26", "idle": "97.74", "user": "******", "cpu_nice": "0.00", "steal": "0.00"
            avg_cpu_iowait_sum = 0
            avg_cpu_system_sum = 0
            avg_cpu_idle_sum = 0
            avg_cpu_user_sum = 0
            avg_cpu_cpu_nice_sum = 0
            avg_cpu_steal_sum = 0

            for j in i["iostat"]["list_stats"]["list_stats"]:
                avg_cpu_iowait_sum += float(j["avg-cpu"]["iowait"])
                avg_cpu_system_sum += float(j["avg-cpu"]["system"])
                avg_cpu_idle_sum += float(j["avg-cpu"]["idle"])
                avg_cpu_user_sum += float(j["avg-cpu"]["user"])
                avg_cpu_cpu_nice_sum += float(j["avg-cpu"]["cpu_nice"])
                avg_cpu_steal_sum += float(j["avg-cpu"]["steal"])

            lst_time_series_a.append(avg_cpu_user_sum / 3.0)
            lst_avg_cpu_nice.append(avg_cpu_cpu_nice_sum / 3.0)
            lst_avg_cpu_system.append(avg_cpu_system_sum / 3.0)
            lst_avg_cpu_iowait.append(avg_cpu_iowait_sum / 3.0)
            lst_avg_cpu_steal.append(avg_cpu_steal_sum / 3.0)
            lst_time_series_b.append(avg_cpu_idle_sum / 3.0)
    # input files is CSV
    else:

        #lst_time, lst_labels, lst_time_series_a, lst_time_series_b = util.extract_time_series_from_csv(dataFile)
        # TODO: Testing Time-Series-A, Time-Series-B switch (Change This back, 03/11/2021)
        lst_time, lst_labels, lst_time_series_b, lst_time_series_a = util.extract_time_series_from_csv(dataFile)

    # Generate Random, Anomaly Data
    # (lst_random_cpu_data, lst_indices_artificial_anomalies) = util.generate_syn_cpu_data(len(lst_time_series_a), 0.10)
    # lst_time_series_a = lst_random_cpu_data.tolist()
    # Comment if you don't want to use the randomly generated Time-Series data
    # s1, s2, anomaly_index1, anomaly_index2, anomaly_index3 = util.generate_synthetic_data(len(lst_time_series_a))
    # lst_time_series_a = s1
    # lst_time_series_b = s2

    print("[+] Size of first time-series data (list): %d " % (len(lst_time_series_a)))

    # TODO: GENERATE 2d Matrix from 2 time-series
    if numDimensions == 2:
        joint_matrix = util.gen_matrix_from_2_time_series(lst_time_series_a, lst_time_series_b, total_number_of_bins)

    # calculation for one parameter.
    # TODO We should make this part a function, so that we could call for different CPU parameters
    total_experiment_in_seconds = (lst_time[-1] - lst_time[0]).total_seconds()
    print("[+] Total Duration for experiment: %f seconds" % total_experiment_in_seconds)

    # MIN-MAX Values for the first Time-Series
    max_time_series_a = max(lst_time_series_a)
    min_time_series_a = min(lst_time_series_a)

    # MIN-MAX Values for the second Time-Series
    max_time_series_b = max(lst_time_series_b)
    min_time_series_b = min(lst_time_series_b)

    # Distance between Max-Min in both time-series
    delta_time_series_a = max_time_series_a - min_time_series_a  # Distance between maximum value and min value
    delta_time_series_b = max_time_series_b - min_time_series_b    # Distance for second Time-Series

    # bin_width form the time-series
    bin_width = delta_time_series_a / total_number_of_bins  # size of each bin, depending on the number of bins
    bin_width2 = delta_time_series_b / total_number_of_bins  # size of each bin in the second time-series

    # BIN_EDGES for each time-series
    bin_edges = np.arange(min_time_series_a, max_time_series_a, bin_width).tolist()  # calculate each bin's boundaries
    bin_edges2 = np.arange(min_time_series_b, max_time_series_b, bin_width2).tolist()  # calculate each bin's boundaries

    bin_edges.append(max_time_series_a)
    bin_edges2.append(max_time_series_b)

    # TODO: We need to slide the time window so that it overlaps with the previous window
    greenwich = lst_time[0]  # First time point from the experiment's log file.
    i = 0
    number_of_time_shifts = 0  # at each iteration we will shift the current window "time_window_shift"
    starting_index = 0  # starting index for current time window

    # list of 2 value tuples, will keep (start_index, ending_index) for each window
    lst_window_start_end_indices = []

    while i < len(lst_time):
        total_shift = number_of_time_shifts * time_window_shift
        number_of_time_shifts += 1

        curtime = greenwich + dt.timedelta(seconds=total_shift)

        # find the current window's starting index,
        # so that lst_time[starting_index] is less than or equal to curtime
        # lst_time[ starting_index ] <= curtime

        while lst_time[starting_index] <= curtime:
            starting_index += 1

        starting_index -= 1
        i = starting_index  # reset "i" to start from the start_index for the current window
        curtime = lst_time[starting_index]  # reset curtime to starting time for the current window

        endtime = curtime + dt.timedelta(seconds=int(time_window_in_seconds))  # upper bound for time record in window

        while (curtime <= endtime) and (i < len(lst_time)):  # loop until we found the index for final time record
            i += 1
            if i >= len(lst_time):
                break
            curtime = lst_time[i]

        ending_index = i - 1  # index for biggest time value in the current time window

        # add (starting_index, ending_index) to list of window indexes
        lst_window_start_end_indices.append((starting_index, ending_index))

        # x = lst_time_series_a[starting_index:ending_index + 1]    # data for the current time window in 1st TS
        x = lst_time_series_a[starting_index:ending_index]          # Not sure why we added +1 before?
        if numDimensions > 1:
            # y = lst_time_series_b[starting_index:ending_index + 1]        # Not sure why we added +1 before?
            y = lst_time_series_b[starting_index:ending_index]              # data for the current time window in 2nd TS

        n = util.calc_bin_disribution(x, bin_edges)

        if numDimensions > 1:      # Only if we are using 2 Time-Series
            n2, bins2, patches2 = plt.hist(y,
                                           bins=bin_edges2,
                                           range=[min_time_series_b, max_time_series_b],  # MIN/MAX for 2nd time-series
                                           #normed=False,
                                           rwidth=0.85,
                                           histtype='step'
                                           )
            plt.close()

            n2_compare = util.calc_bin_disribution(y, bin_edges2)
            if n2.all() == n2_compare.all():
                print(f'[+] Our bin distribution is working. (len(n2)={len(n2)}), (len(n2_compare)={len(n2_compare)})')
            else:
                print('[+] Our bin distribution is NOT working')

        if numDimensions > 1:
            jpm, n_npm = util.gen_matrix_from_N_time_series(n, n2, n2, n2)
            # jpm, n_npm = util.gen_matrix_from_3_time_series(n, n, n2)
            # jpm, n_npm = util.gen_matrix_from_2_time_series(n, n2)
            jpm_raveled = jpm.ravel()
        else:       # for one-time-series only
            jpm_raveled = n

        jpm_raveled = jpm_raveled.astype(float)

        # SOFTMAX'ing the distribution of data-points into BINS at the current window.
        x1 = np.asarray(jpm_raveled)
        x2 = np.reshape(x1, (1, len(x1)))
        x3 = -x2                                # TODO: Ask Korkut abi, why multiply by -1 ?
        x4 = softmax(x3)
        x5 = np.reshape(x4, len(jpm_raveled))
        x6 = x5.tolist()

        lst_softmaxed.append(x6)  # Probability distribution of cpu usage

    # Now we went through whole array of values, calculated soft-maxes, it's time to calculate anomaly_scores
    print("[+] Size of lst_softmaxed: %d" % (len(lst_softmaxed)))

    # These are the weights for KL calculations
    m1 = p_m1       # 0.70
    m2 = p_m2       # 0.25
    m3 = p_m3       # 0.05

    epsilon = p_epsilon     # 0.025         # Threshold value for anomaly: f(w) - ψ > ε         (Equation-8)
    gamma = p_gamma         # 0.3           # In paper gamma is used in Eq.7 to calculate MU_w
    alpha = p_alpha         # 0.59          # In paper alpha is used in Eq.8 to calculate ψ = (µ{w−1} + α*σ{w−1})

    # List of Dr. Bruno's anomaly scores. List consisting of f(w) for each window, starting from window#3.
    lst_anomaly_scores_T = []

    # µw => Moving Average of f(w).
    lst_mvavg = []             # µw, (i.e. MU_w): moving average(µ) for current window (w). Equation-7 in SymKL Paper.

    # σw => Standard Deviation, that are recursively updated below
    lst_std = []               # σw, (i.e. SIGMA_w): Std Deviation of current window (w). Eq-7 in paper.

    # anomaly threshold -       # ψ =  µ_{w−1} + (α * σ{w−1} )      # Equation-8 in sym-kl paper.
    lst_anomaly_runningavg = []

    # difference between f(w) and moving averages  # ∆
    lst_delta = []       # Equation-8:  DELTA = f(w) - ( MU_{w-1} + ALPHA*SIGMA_{w−1} )

    # this will count till 3 before calculating new moving averages
    reset_wait_counter = 0

    # anomaly detected
    b_anomaly_detected = False

    # right after an anomaly, we need to start counting,
    # keep another boolean to detect the start of counting time
    b_start_timer = False

    ######################################################################
    # calculate KL distance starting from index 3 (indexing starts from 0)
    # Will compare current item, (i), with  (i-1), (i-2), (i-3)
    # m1 * KL( lst_softmaxed[i], lst_sofmaxed[i-1] ) +
    # m2 * KL( lst_softmaxed[i], lst_softmaxed[i-2] ) +
    # m3 * KL ( lst_softmaxed[i], lst_softmaxed[i-3])
    ######################################################################
    for i in range(0, len(lst_softmaxed)):
        cur_window_f_w = 0                  # f(w)
        cur_window_moving_avg = 0           # µ(w)
        cur_window_std_dev = 0              # σ(w)
        cur_window_psi = 0                  # ψ(w)
        tl1=0
        tl2=0

        if i == 0:
            j4 = lst_softmaxed[i]       # NOT KL, it's ENTROPY of 1-distribution only. (i.e. 2.88104)

        elif i == 1:
            j1 = [z * m1 for z in lst_softmaxed[i - 1]]
            j4 = [sum(index1) for index1 in zip(j1)]        # j1 == j4 (is TRUE)

        elif i == 2:
            j1 = [z * m1 for z in lst_softmaxed[i - 1]]
            j2 = [z * m2 for z in lst_softmaxed[i - 2]]
            j4 = [sum(index1) for index1 in zip(j1, j2)]

        elif i >= 3:
            # lst_softmaxed -> paper's equation-(6)
            j1 = [z * m1 for z in lst_softmaxed[i - 1]]
            j2 = [z * m2 for z in lst_softmaxed[i - 2]]
            j3 = [z * m3 for z in lst_softmaxed[i - 3]]
            j4 = [sum(index1) for index1 in zip(j1, j2, j3)]

        tl1 = entropy(lst_softmaxed[i], j4)     # j4 = m1*P1 + m2*P2 + m3*P3 (Equation-6)
        tl2 = entropy(j4, lst_softmaxed[i])     # KL is implemented in entropy(q,p) function.
        cur_window_f_w = tl1 + tl2                         # f(w), equation-6's result.

        # lst_anomaly_scores_T[i] -> f(w)
        if b_start_timer and not b_anomaly_detected and 3 >= reset_wait_counter > 0:
            cur_window_f_w = 0                                  # f(w)
            cur_window_moving_avg = 0                           # µ(w)
            cur_window_std_dev = 0                              # σ(w)
            lst_anomaly_scores_T.append(cur_window_f_w)
            lst_mvavg.append(cur_window_moving_avg)
            lst_std.append(cur_window_std_dev)

        else:
            lst_anomaly_scores_T.append(cur_window_f_w)         # f(w)

            # Calculate µ (mean) for current window.  µ(w_i): for i=0, µ(w_i)=0
            #cur_window_moving_avg = (gamma * lst_mvavg[i - 4]) + ((1 - gamma) * lst_anomaly_scores_T[i - 4])
            if i == 0:
                cur_window_moving_avg = 0
            else:
                cur_window_moving_avg = (gamma * lst_mvavg[i - 1]) + ((1 - gamma) * cur_window_f_w)
            lst_mvavg.append(cur_window_moving_avg)

            # Calculate σ (Standard Deviation) for current window.  σ(w_i): for i=0, σ(w_i)=0
            if i == 0:
                cur_window_std_dev = 0
            else:
                cur_window_std_dev = np.sqrt(
                    (gamma * (lst_std[i - 1] ** 2)) +
                    ((1 - gamma) * ((cur_window_f_w - cur_window_moving_avg)**2)))
            lst_std.append(cur_window_std_dev)

        # lst_anomaly_runningavg -> ψ -> paper's Equation-8: MU_{w-1} + alpha*sigma{w-1}
        if i == 0:
            cur_window_psi = 0
        else:
            cur_window_psi = lst_mvavg[i - 1] + (alpha * lst_std[i - 1])        # ψ(w), based on (w-1)
        lst_anomaly_runningavg.append(cur_window_psi)

        lst_delta.append(cur_window_f_w - cur_window_psi)             # ∆(w_i) = f(w_i) - ψ(w_i)

##################################################################################################
####### This section only changes BOOLEAN values that effects next iteration######################
        if lst_delta[-1] > epsilon and not b_anomaly_detected:
            b_anomaly_detected = True
            # reset_wait_counter += 1

        # We are in ANOMALY REGION, check for leaving ANOMALY
        elif lst_delta[-1] > epsilon and b_anomaly_detected:
            # do nothing
            continue
        # Going back below epsilon threshold,
        # change the boolean(detected) to false,
        # start the counter (reset_wait_counter)
        elif lst_delta[-1] <= epsilon and b_anomaly_detected:
            b_anomaly_detected = False
            b_start_timer = True

        if b_start_timer and reset_wait_counter < 3:
            reset_wait_counter += 1
        elif b_start_timer and reset_wait_counter == 3:
            b_start_timer = False
            reset_wait_counter = 0
##################################################################################################

    # Generates index of every data-point, that has label: 1 (index of each abnormal data-point)
    lst_original_anomaly_indices = getIndicesOfAbnormalDataPoints(lst_labels)

    # This will return a list of indices that our method thinks is anomalous
    lst_calculated_anomaly_indices = get_indices_of_calculated_anomalies(lst_delta, lst_window_start_end_indices,
                                                                         greenwich, time_window_shift,
                                                                         time_window_in_seconds, epsilon)

    # Given original anomaly indices, compare to our method's calculated anomaly indices and find tp/fp/tn/fn results
    results = calculate_tpn_fprn_tnrn_fnn(lst_original_anomaly_indices, lst_calculated_anomaly_indices,
                                lst_window_start_end_indices[0][0], lst_window_start_end_indices[-1][1])

    # From Dr. Issa's anomaly detection paper:
    # False Positive Rate (FPR) , Detection Rate (DR)
    # DR is same as Recall
    # FPR = 100 * ( sum(FP) / sum(FP+TN) )
    # DR  = 100 * ( sum(TP) / sum(TP+FN) )

    # results = (true_positives, false_positives, true_negatives, false_negatives)
    TP = results[0]
    FP = results[1]
    TN = results[2]
    FN = results[3]
    #print(f"[+] New Results: nTP: {TP}, nFP: {FP}, nTN: {TN}, nFN: {FN}")

    precision = recall = FPR = 0
    # Precision, Positive Predictive Value (PPV)
    # precision = TP / (TP + FP)
    if (float(TP) + float(FP)) != 0:
        precision = float(TP) / (float(TP) + float(FP))
    else:
        precision = 0

    # TRUE POSITIVE RATE, Recall, Sensitivity, Hit Rate, Detection Rate.
    # Recall = TP / (TP + FN)
    if (float(TP) + float(FN)) != 0:
        recall = TP / (float(TP) + float(FN))
    else:
        recall = 0

    # FALSE POSITIVE RATE == (Fall-out, False Alarm Ratio)
    if (float(FP) + float(TN)) != 0:
        FPR = float(FP) / (float(FP) + float(TN))
    else:
        FPR = 0

    nab = False

    filename = dataFile.split("/")[-1]
    # RANDOM ARTIFICIAL DATA
    if not nab:
        #print("[+] FileName: %s, #DataPoints: %d, #OriginalAnomalies:%d, TP: %.4f, FP: %.4f, TN: %.4f, FN: %.4f, " \
        #      "recall: %.4f, precision: %.4f, FPR: %.4f\n" % \
        #      (filename, len(lst_time), len(lst_original_anomaly_indices), TP, FP, TN, FN, recall, precision, FPR))
        return TP, FP, TN, FN, recall, precision, FPR

    plt.close()
    #plt.clf()
    fig = plt.figure(figsize=(12.8, 9.6))
    plt.subplot(3, 1, 1)
    plt.xlabel("Sliding Time Window")
    plt.ylabel("Anomaly Score")
    plt.title("Anomaly Score Graph\n#Windows: %d, window: %d sec, "
              "win_slide: %d sec, m1: %.2f, m2: %.2f, m3: %.2f, "
              "alpha: %.2f, gamma: %.2f, epsilon: %.2f" %
              ((len(lst_anomaly_scores_T) + 3), time_window_in_seconds, time_window_shift, m1, m2, m3, alpha, gamma, epsilon))
    plt.grid(True)
    plt.plot(lst_anomaly_scores_T, 'b', label='f(w)')  # f(w)
    plt.plot(lst_anomaly_runningavg, 'r', label=r"$(\mu_{w-1} + \alpha \sigma_{w-1})$")  # nu_{w-1} + alpha*sigma{w-1}
    plt.legend(loc='upper left')
    plt.subplot(3, 1, 2)
    # plt.xlabel("Sliding Time Window")
    plt.ylabel(r"$f(w) - \mu_{w-1} + \alpha \sigma_{w-1}$")
    plt.plot(lst_delta, 'g', label="Delta")  # delta, difference between f(w) and moving averages
    plt.plot(epsilon * np.ones(len(lst_delta)), 'y', label="Epsilon")
    plt.legend(loc='upper left')

    plt.subplot(3, 1, 3)
    plt.xlabel("Time")
    plt.ylabel(r"Synthetic Data - CPU Usage")
    plt.title("File: %s, DataPoints: %d, DetectRate(Recall): %.4f, correct_detection_count: %d, "
              "total_number_of_anomalies: %d" %
              (filename, len(lst_time), detection_rate, correct_detection_counter, lst_original_anomaly_indices.size))
    # plt.plot(lst_time_series_a, 'g', label="CPU")
    plt.plot(lst_time, lst_time_series_a, 'bo', label='CPU_Artificial_data')
    plt.plot(lst_time, 2 * np.ones(len(lst_time_series_a)), 'r-', label="Lower Bound on Anomalous")
    plt.legend(loc='upper left')

    '''
        Uncomment below to SAVE THE FIGURE in file
    '''
    pathtostats = "/".join(dataFile.split("/")[:-2])
    pathtostats = "/".join(dataFile.split("/")[:-7]) + "/CPU_usage_plots/SyntheticCPU/anomalies/JointTimeSeries2DMatrix"

    # addition for SYNTHETIC CPU values
    imagefilename = (pathtostats + "/anomaly_score_%s.png") % filename
    plt.savefig(imagefilename, dpi=1000, bbox_inches='tight')
    plt.close(fig)

    '''
        Plotting confusion matrix for each file.
    '''
    # Try to plot confusion matrix for each of the NAB file's result
    np.set_printoptions(precision=2)

    util.plot_confusion_matrix(TP=TP, FN=FN, FP=FP, TN=TN,
                               title='TP: %d, FN: %d, FP: %d, TN: %d, %s' %
                                     (int(TP), int(FN), int(FP), int(TN), filename), normalize=False)

    plt.savefig((pathtostats+"/ConfMatrix_%s.png") % filename, format='png')

    # Plot normalized confusion matrix
    # plot_confusion_matrix(y_test, y_pred, classes=class_names, normalize=True,
    #                      title='Normalized confusion matrix')
    #plt.show()

    # print("[+] Size of Anomaly_Scores: %d" % (len(anomaly_scores)))
    print("[+] Finished with file: %s, \n======\n" % filename)
Example #41
0
def lwip_test_suite(env, extra_data):
    global stop_io_listener
    global stop_sock_listener
    """
    steps: |
      1. Rebuilds test suite with esp32_netsuite.ttcn
      2. Starts listeners on stdout and socket
      3. Execute ttcn3 test suite
      4. Collect result from ttcn3
    """
    dut1 = env.get_dut("net_suite", "examples/system/network_tests")
    # check and log bin size
    binary_file = os.path.join(dut1.app.binary_path, "net_suite.bin")
    bin_size = os.path.getsize(binary_file)
    IDF.log_performance("net_suite", "{}KB".format(bin_size // 1024))
    IDF.check_performance("net_suite", bin_size // 1024)
    dut1.start_app()
    thread1 = Thread(target=sock_listener, args=(dut1, ))
    thread2 = Thread(target=io_listener, args=(dut1, ))
    if not manual_test:
        # Variables refering to esp32 ttcn test suite
        TTCN_SRC = 'esp32_netsuite.ttcn'
        TTCN_CFG = 'esp32_netsuite.cfg'
        # System Paths
        netsuite_path = os.getenv("NETSUITE_PATH")
        netsuite_src_path = os.path.join(netsuite_path, "src")
        test_dir = os.path.dirname(os.path.realpath(__file__))
        # Building the suite
        print("Rebuilding the test suite")
        print("-------------------------")
        # copy esp32 specific files to ttcn net-suite dir
        copyfile(os.path.join(test_dir, TTCN_SRC),
                 os.path.join(netsuite_src_path, TTCN_SRC))
        copyfile(os.path.join(test_dir, TTCN_CFG),
                 os.path.join(netsuite_src_path, TTCN_CFG))
        proc = subprocess.Popen(
            ['bash', '-c', 'cd ' + netsuite_src_path + ' && source make.sh'],
            cwd=netsuite_path,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE)
        output = proc.stdout.read()
        print(
            "Note: First build step we expect failure (titan/net_suite build system not suitable for multijob make)"
        )
        print(output)
        proc = subprocess.Popen(
            ['bash', '-c', 'cd ' + netsuite_src_path + ' && make'],
            cwd=netsuite_path,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE)
        print(
            "Note: This time all dependencies shall be generated -- multijob make shall pass"
        )
        output = proc.stdout.read()
        print(output)
        # Executing the test suite
        thread1.start()
        thread2.start()
        time.sleep(2)
        print("Executing the test suite")
        print("------------------------")
        proc = subprocess.Popen([
            'ttcn3_start',
            os.path.join(netsuite_src_path, 'test_suite'),
            os.path.join(netsuite_src_path, TTCN_CFG)
        ],
                                stdout=subprocess.PIPE)
        output = proc.stdout.read()
        print(output)
        print("Collecting results")
        print("------------------")
        verdict_stats = re.search('(Verdict statistics:.*)', output)
        if verdict_stats:
            verdict_stats = verdict_stats.group(1)
        else:
            verdict_stats = b""
        verdict = re.search('Overall verdict: pass', output)
        if verdict:
            print("Test passed!")
            Utility.console_log(verdict_stats, "green")
        else:
            Utility.console_log(verdict_stats, "red")
            raise ValueError('Test failed with: {}'.format(verdict_stats))
    else:
        try:
            # Executing the test suite
            thread1.start()
            thread2.start()
            time.sleep(2)
            while True:
                time.sleep(0.5)
        except KeyboardInterrupt:
            pass
    print("Executing done, waiting for tests to finish")
    print("-------------------------------------------")
    stop_io_listener.set()
    stop_sock_listener.set()
    thread1.join()
    thread2.join()
 def __init__(self):
     self.reserved_stems_list = Utility.load_words(
         './DATA/reserved_word_list.txt')
     self.small_stems_list = Utility.load_words(
         './DATA/small_words_list.txt')
Example #43
0
    def play_game(self):
        """ Play the 'is multiple' game """
        status = "incomplete"
        game_status = None

        while status == "incomplete":
            try:
                # Get two integers from user
                first_num = int(input("Type in first integer value: "))
                str_first_num = str(first_num)
                second_num = int(
                    input("Type in a possible multiple of '" + str(first_num) +
                          "' : "))
                # WARNING  - Log that possible "vaue error may occurr?"
                self.game_logger.doLog(
                    "warn",
                    "python-logstash: Got user input, may be null values.")

                print(
                    "++++++++++++++++++++++++++ RESULT +++++++++++++++++++++++++++++++"
                )

                # Check if second number is multiple of first
                if self.is_multiple(first_num, second_num):
                    print("{0} is a multiple of {1}.".format(
                        second_num, first_num))
                    game_status = True
                else:
                    print("{0} is not a multiple of {1}.".format(
                        second_num, first_num))
                    game_status = False

                print(
                    "*****************************************************************"
                )

                # Add extra fields to logstash msg
                extra = self.game_stats(first_num, second_num, game_status)
                # Log msg plus exta fields.
                self.game_logger.doLog(
                    "info", "python-logstash: game completed successfully.",
                    extra)

                status = "complete"
            except ValueError:
                # If non-numeric value is typed, show error message
                util.error_display("ERROR: Please type in numeric input(s).")
                # TODO: ERROR Log Value Error
                self.game_logger.doLog("exception",
                                       "python-logstash: ValueError occurred")
            finally:
                try:
                    play_again = int(
                        input(
                            "Play again? (type '1' for yes or '2' for No.): "))
                    if play_again == 1:
                        util.game_header()
                        util.game_instructions()
                        self.play_game()
                    else:
                        status = "complete"
                except NameError:
                    self.game_logger.doLog(
                        "error",
                        "python-logstash: NameError occurred in try again area."
                    )
                    status = "complete"
Example #44
0
def calculate_dist_threshold(dist_list):
    dist_list.sort(reverse=True)
    # max_dist = max(dist_list)
    kernel = Utility.generate_gaus_kernel(4)
    smoothed_dist_list = Utility.calculate_c
Example #45
0
        return dscores


if __name__ == '__main__':
    # path to the saved learned parameter
    learn_data = 'result/SoftMax1/cifar_10'

    D = 3072  # dimensionality
    K = 10  # number of classes

    # Neural Network
    nn = SoftMax(D, K)

    # load the CIFAR10 data
    X, y, X_test, y_test = util.load_CIFAR10('data/')

    # Train the Neural Network
    if util.file_exist(learn_data):
        nn_parameter = util.unpickle(learn_data)
    else:
        nn_parameter = nn.training(X, y)

        util.pickle_nn(learn_data, nn_parameter)

    # Test the Neural Network
    predicted_labels = nn.predict(X_test, y_test, nn_parameter)

    # Save the predictions to label
    util.save_predicted_labels('result/SoftMax1/submission.csv', predicted_labels)
Example #46
0
def cb_smot(points, min_time, area):
    eps = caculate_eps(points, area)
    # eps = 100
    surround_points = defaultdict(list)
    # 计算每个数据点相邻的数据点,邻域距离上限定义为Eps
    for i in range(len(points)):
        sum_distance = 0
        idx = i - 1
        surround_points[i].append(i)
        while idx >= 0 and sum_distance <= eps:
            surround_points[i].append(idx)
            sum_distance += Utility.distance_calculate(points[idx], points[i])
            idx -= 1
        sum_distance = 0
        idx = i + 1
        while idx < len(points) and sum_distance <= eps:
            surround_points[i].append(idx)
            sum_distance += Utility.distance_calculate(points[idx], points[i])
            idx += 1
    # 定义邻域内相邻的数据点的个数大于MinPts的为核心点
    core_point_idx = [
        pointIdx for pointIdx, surPointIdxs in surround_points.iteritems()
        if (points[max(surPointIdxs)].time -
            points[min(surPointIdxs)].time) >= min_time
    ]

    # 邻域内包含某个核心点的非核心点,定义为边界点
    border_point_idx = []
    for point_idx, sur_points_idx in surround_points.iteritems():
        if point_idx not in core_point_idx:
            for idx in sur_points_idx:
                if idx in core_point_idx:
                    if point_idx not in border_point_idx:
                        border_point_idx.append(point_idx)
                    break
                    # 噪音点既不是边界点也不是核心点
        noise_point_idx = [
            point_idx for point_idx in range(len(points))
            if point_idx not in core_point_idx
            and point_idx not in border_point_idx
        ]

        groups = [idx for idx in range(len(points))]
        # 各个核心点与其邻域内的所有核心点放在同一个簇中
        for pointidx, surroundIdxs in surround_points.iteritems():
            for oneSurroundIdx in surroundIdxs:
                if pointidx in core_point_idx and oneSurroundIdx in core_point_idx and pointidx < oneSurroundIdx:
                    for idx in range(len(groups)):
                        if groups[idx] == groups[oneSurroundIdx]:
                            groups[idx] = groups[pointidx]
        # 边界点跟其邻域内的某个核心点放在同一个簇中
        for pointidx, surroundIdxs in surround_points.iteritems():
            for oneSurroundIdx in surroundIdxs:
                if pointidx in border_point_idx and oneSurroundIdx in core_point_idx:
                    groups[pointidx] = groups[oneSurroundIdx]
                    break
        # 获取所有的分组
        clusters = defaultdict(list)
        for i, group_idx in enumerate(groups):
            if i not in noise_point_idx:
                clusters[group_idx].append(points[i])
        return clusters
Example #47
0
import Utility as ut
x = int(input("Enter the x position"))
y = int(input("Enter the y position"))
a = ut.euclidean(x, y)
Example #48
0
    def __init__(self):

        super(MainWindow, self).__init__()

        # Set up the user interface from Designer.
        self.setupUi(self)

        # Center main application window
        util.center_window(self)

        # Disable resizing
        self.setFixedSize(self.size())

        # TODO: init editor with no file here?
        # Create the fuel map editor variable
        self._fl_map_editor = None
        self._ign_pt_editor = None
        self._visualization = None

        # Disable export of files until one is loaded
        self.action_export_fuel_map.setEnabled(False)
        self.action_export_dem.setEnabled(False)
        self.action_export_summary_file.setEnabled(False)
        self.action_export_environment.setEnabled(False)
        self.action_create_environment.setEnabled(False)

        self._sim_settings_tab.setEnabled(False)
        self._fl_type_lgnd_tab.setEnabled(False)
        self.ignition_point_legend_tab.setEnabled(False)

        # Hide and reset progress bar
        self.__hide_and_reset_progress()

        # TODO: we might not need this since we check on button press
        # Setup validation for fuel map editor inputs
        # self._x_rng_min_fl_line_edit.returnPressed.connect(self.__x_rng_ret_pressed())
        # self._x_rng_max_fl_line_edit.returnPressed.connect(self.__x_rng_ret_pressed())

        # self._y_rng_min_fl_line_edit.returnPressed.connect(self.__y_rng_ret_pressed())
        # self._y_rng_max_fl_line_edit.returnPressed.connect(self.__y_rng_ret_pressed())

        # Initialize fds object
        self._fds = Fds()

        # Initialize smv_file to be None
        self._smv_file = None

        # Setup and hide the fuel type legend grid
        self._fl_type_grid_layout_widget = QWidget(self)
        self._fl_type_grid_layout = QGridLayout(self._fl_type_grid_layout_widget)

        # HIDE THIS or it will cause problems with GUI (cant click on part of menu bar)
        self._fl_type_grid_layout_widget.hide()

        # Setup and hide the ignition point type legend grid
        self._ign_pt_type_grid_layout_widget = QWidget(self)
        self._ign_pt_type_grid_layout = QGridLayout(self._ign_pt_type_grid_layout_widget)
        self._ign_pt_type_grid_layout_widget.hide()

        # TODO: make use of this variable
        # Initialize selected output file types
        self._output_file_types = []

        # Set tab widget to sim settings tab
        self._tab_widget.setCurrentIndex(0)

        self._tab_widget.currentChanged.connect(self.__tab_changed)

        self.modify_fuel_map_button.clicked.connect(self.__modify_fuel_map)
        self.modify_ign_pts_button.clicked.connect(self.__modify_ignition_map)

        for child in self._menu_bar.children():
            if type(child) is QtWidgets.QMenu:
                for action in child.actions():
                    # Use objectName as identifier so as to ensure uniqueness of identifier
                    identifier = action.objectName()
                    action.triggered.connect(lambda state, x=identifier: self.__handle_button(x))
Example #49
0
def detect_update_unit_test_info(env, extra_data, app_bin):

    case_config = format_test_case_config(extra_data)

    for ut_config in case_config:
        dut = env.get_dut("unit-test-app", app_path=ut_config)
        replace_app_bin(dut, "unit-test-app", app_bin)
        dut.start_app()

        reset_dut(dut)

        # get the list of test cases
        dut.write("")
        dut.expect("Here's the test menu, pick your combo:",
                   timeout=DEFAULT_TIMEOUT)

        def find_update_dic(name, _t, _timeout, child_case_num=None):
            for _case_data in extra_data:
                if _case_data['name'] == name:
                    _case_data['type'] = _t
                    if 'timeout' not in _case_data:
                        _case_data['timeout'] = _timeout
                    if child_case_num:
                        _case_data['child case num'] = child_case_num

        try:
            while True:
                data = dut.expect(TEST_PATTERN, timeout=DEFAULT_TIMEOUT)
                test_case_name = data[1]
                m = re.search(r'\[timeout=(\d+)\]', data[2])
                if m:
                    timeout = int(m.group(1))
                else:
                    timeout = 30
                m = re.search(r'\[multi_stage\]', data[2])
                if m:
                    test_case_type = MULTI_STAGE_ID
                else:
                    m = re.search(r'\[multi_device\]', data[2])
                    if m:
                        test_case_type = MULTI_DEVICE_ID
                    else:
                        test_case_type = SIMPLE_TEST_ID
                        find_update_dic(test_case_name, test_case_type,
                                        timeout)
                        if data[3] and re.search(END_LIST_STR, data[3]):
                            break
                        continue
                # find the last submenu item
                data = dut.expect(TEST_SUBMENU_PATTERN,
                                  timeout=DEFAULT_TIMEOUT)
                find_update_dic(test_case_name,
                                test_case_type,
                                timeout,
                                child_case_num=int(data[0]))
                if data[1] and re.search(END_LIST_STR, data[1]):
                    break
            # check if the unit test case names are correct, i.e. they could be found in the device
            for _dic in extra_data:
                if 'type' not in _dic:
                    raise ValueError(
                        "Unit test \"{}\" doesn't exist in the flashed device!"
                        .format(_dic.get('name')))
        except ExpectTimeout:
            Utility.console_log("Timeout during getting the test list",
                                color="red")
        finally:
            dut.close()

        # These options are the same for all configs, therefore there is no need to continue
        break
Example #50
0
    logger.setLogDir()
    job.setLogger(logger)

    # invalidate hive table
    job.validateTbls(confTbl, jobTbl)

    # get cron job info
    data = csv.DictReader(open(filename))
    records = []

    for row in data:
        pName = row[search_col]
        freq = row['Frequency']
        time = row['Start Time UTC']

        print(pName, time)

        # find & create job
        jobId = job.findJobId(confTbl, jobTbl, pName)
        print("job id: ", jobId)
        if jobId == 0:
            print("WARNING: Job can't be found.")
        else:
            record = CronRecord(jobId, time, freq)
            records.append(record)

    filename = "croninfo"
    job.writeToFile(filename, records)

    util.script_exit(True)
Example #51
0
    def run(self):

        self.dut.start_capture_raw_data()

        def get_child_case_name(data):
            self.child_case_name = data[0]
            time.sleep(1)
            self.dut.write(str(self.child_case_index))

        def one_device_case_finish(result):
            """ one test finished, let expect loop break and log result """
            self.finish = True
            self.result = result
            self.output = "[{}]\n\n{}\n".format(
                self.child_case_name, self.dut.stop_capture_raw_data())
            if not result:
                self.fail_name = self.child_case_name

        def device_wait_action(data):
            start_time = time.time()
            expected_signal = data[0]
            while 1:
                if time.time() > start_time + self.timeout:
                    Utility.console_log("Timeout in device for function: %s" %
                                        self.child_case_name,
                                        color="orange")
                    break
                with self.lock:
                    for sent_signal in self.sent_signal_list:
                        if expected_signal == sent_signal["name"]:
                            self.dut.write(sent_signal["parameter"])
                            self.sent_signal_list.remove(sent_signal)
                            break
                    else:
                        time.sleep(0.01)
                        continue
                    break

        def device_send_action(data):
            with self.lock:
                self.sent_signal_list.append({
                    "name":
                    data[0].encode('utf-8'),
                    "parameter":
                    "" if data[2] is None else data[2].encode('utf-8')
                    # no parameter means we only write EOL to DUT
                })

        def handle_device_test_finish(data):
            """ test finished without reset """
            # in this scenario reset should not happen
            if int(data[1]):
                # case ignored
                Utility.console_log("Ignored: " + self.child_case_name,
                                    color="orange")
            one_device_case_finish(not int(data[0]))

        try:
            time.sleep(1)
            self.dut.write("\"{}\"".format(self.parent_case_name))
            self.dut.expect("Running " + self.parent_case_name + "...")
        except ExpectTimeout:
            Utility.console_log("No case detected!", color="orange")
        while not self.finish and not self.force_stop.isSet():
            try:
                self.dut.expect_any(
                    (
                        re.compile('\(' + str(self.child_case_index) +
                                   '\)\s"(\w+)"'),  # noqa: W605 - regex
                        get_child_case_name),
                    (self.WAIT_SIGNAL_PATTERN,
                     device_wait_action),  # wait signal pattern
                    (self.SEND_SIGNAL_PATTERN,
                     device_send_action),  # send signal pattern
                    (self.FINISH_PATTERN,
                     handle_device_test_finish),  # test finish pattern
                    timeout=self.timeout)
            except ExpectTimeout:
                Utility.console_log("Timeout in expect", color="orange")
                one_device_case_finish(False)
                break
Example #52
0
def test_unit_test_case(env, extra_data):
    """
    extra_data can be three types of value
    1. as string:
               1. "case_name"
               2. "case_name [reset=RESET_REASON]"
    2. as dict:
               1. with key like {"name": "Intr_alloc test, shared ints"}
               2. with key like {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET", "config": "psram"}
    3. as list of string or dict:
               [case1, case2, case3, {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}, ...]

    :param extra_data: the case name or case list or case dictionary
    :return: None
    """

    case_config = format_test_case_config(extra_data)

    # compile the patterns for expect only once
    reset_pattern = re.compile(
        r"(ets [\w]{3}\s+[\d]{1,2} [\d]{4} [\d]{2}:[\d]{2}:[\d]{2}[^()]*\([\w].*?\))"
    )
    exception_pattern = re.compile(
        r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))")
    abort_pattern = re.compile(
        r"(abort\(\) was called at PC 0x[a-eA-E\d]{8} on core \d)")
    finish_pattern = re.compile(r"1 Tests (\d) Failures (\d) Ignored")

    # we don't want stop on failed case (unless some special scenarios we can't handle)
    # this flag is used to log if any of the case failed during executing
    # Before exit test function this flag is used to log if the case fails
    failed_cases = []

    for ut_config in case_config:
        dut = env.get_dut("unit-test-app", app_path=ut_config)
        dut.start_app()

        for one_case in case_config[ut_config]:
            dut.reset()
            # esptool ``run`` cmd takes quite long time.
            # before reset finish, serial port is closed. therefore DUT could already bootup before serial port opened.
            # this could cause checking bootup print failed.
            # now we input cmd `-`, and check either bootup print or test history,
            # to determine if DUT is ready to test.
            dut.write("-", flush=False)
            dut.expect_any(UT_APP_BOOT_UP_DONE, "0 Tests 0 Failures 0 Ignored")

            # run test case
            dut.write("\"{}\"".format(one_case["name"]))
            dut.expect("Running " + one_case["name"] + "...")

            exception_reset_list = []

            # we want to set this flag in callbacks (inner functions)
            # use list here so we can use append to set this flag
            test_finish = list()

            # expect callbacks
            def one_case_finish(result):
                """ one test finished, let expect loop break and log result """
                test_finish.append(True)
                if result:
                    Utility.console_log("Success: " + one_case["name"],
                                        color="green")
                else:
                    failed_cases.append(one_case["name"])
                    Utility.console_log("Failed: " + one_case["name"],
                                        color="red")

            def handle_exception_reset(data):
                """
                just append data to exception list.
                exception list will be checked in ``handle_reset_finish``, once reset finished.
                """
                exception_reset_list.append(data[0])

            def handle_test_finish(data):
                """ test finished without reset """
                # in this scenario reset should not happen
                assert not exception_reset_list
                if int(data[1]):
                    # case ignored
                    Utility.console_log("Ignored: " + one_case["name"],
                                        color="orange")
                one_case_finish(not int(data[0]))

            def handle_reset_finish(data):
                """ reset happened and reboot finished """
                assert exception_reset_list  # reboot but no exception/reset logged. should never happen
                result = False
                if len(one_case["reset"]) == len(exception_reset_list):
                    for i, exception in enumerate(exception_reset_list):
                        if one_case["reset"][i] not in exception:
                            break
                    else:
                        result = True
                if not result:
                    Utility.console_log(
                        """Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}"""
                        .format(one_case["reset"], exception_reset_list),
                        color="orange")
                one_case_finish(result)

            while not test_finish:
                try:
                    dut.expect_any(
                        (reset_pattern,
                         handle_exception_reset),  # reset pattern
                        (exception_pattern,
                         handle_exception_reset),  # exception pattern
                        (abort_pattern,
                         handle_exception_reset),  # abort pattern
                        (finish_pattern,
                         handle_test_finish),  # test finish pattern
                        (UT_APP_BOOT_UP_DONE,
                         handle_reset_finish),  # reboot finish pattern
                        timeout=UT_TIMEOUT)
                except ExpectTimeout:
                    Utility.console_log("Timeout in expect", color="orange")
                    one_case_finish(False)
                    break

    # raise exception if any case fails
    if failed_cases:
        Utility.console_log("Failed Cases:", color="red")
        for _case_name in failed_cases:
            Utility.console_log("\t" + _case_name, color="red")
        raise AssertionError("Unit Test Failed")
Example #53
0
                raise ValueError('Error in argument item {} of {}'.format(
                    test_item, test))
        test_dict['app_bin'] = args.app_bin
        list_of_dicts.append(test_dict)

    TinyFW.set_default_config(env_config_file=args.env_config_file)

    env_config = TinyFW.get_default_config()
    env_config['app'] = UT
    env_config['dut'] = IDF.IDFDUT
    env_config['test_suite_name'] = 'unit_test_parsing'
    test_env = Env.Env(**env_config)
    detect_update_unit_test_info(test_env,
                                 extra_data=list_of_dicts,
                                 app_bin=args.app_bin)

    for index in range(1, args.repeat + 1):
        if args.repeat > 1:
            Utility.console_log("Repetition {}".format(index), color="green")
        for dic in list_of_dicts:
            t = dic.get('type', SIMPLE_TEST_ID)
            if t == SIMPLE_TEST_ID:
                run_unit_test_cases(extra_data=dic)
            elif t == MULTI_STAGE_ID:
                run_multiple_stage_cases(extra_data=dic)
            elif t == MULTI_DEVICE_ID:
                run_multiple_devices_cases(extra_data=dic)
            else:
                raise ValueError('Unknown type {} of {}'.format(
                    t, dic.get('name')))
Example #54
0
    def run(self):
        def get_child_case_name(data):
            self.child_case_name = data[0]
            time.sleep(1)
            self.dut.write(str(self.child_case_index))

        def one_device_case_finish(result):
            """ one test finished, let expect loop break and log result """
            self.finish = True
            self.result = result
            if not result:
                self.fail_name = self.child_case_name

        def device_wait_action(data):
            start_time = time.time()
            expected_signal = data[0]
            while 1:
                if time.time() > start_time + self.timeout:
                    Utility.console_log("Timeout in device for function: %s" %
                                        self.child_case_name,
                                        color="orange")
                    break
                with self.lock:
                    if expected_signal in self.sent_signal_list:
                        self.dut.write(" ")
                        self.sent_signal_list.remove(expected_signal)
                        break
                time.sleep(0.01)

        def device_send_action(data):
            with self.lock:
                self.sent_signal_list.append(data[0].encode('utf-8'))

        def handle_device_test_finish(data):
            """ test finished without reset """
            # in this scenario reset should not happen
            if int(data[1]):
                # case ignored
                Utility.console_log("Ignored: " + self.child_case_name,
                                    color="orange")
            one_device_case_finish(not int(data[0]))

        self.dut.reset()
        self.dut.write("-", flush=False)
        self.dut.expect_any(UT_APP_BOOT_UP_DONE,
                            "0 Tests 0 Failures 0 Ignored")
        time.sleep(1)
        self.dut.write("\"{}\"".format(self.parent_case_name))
        self.dut.expect("Running " + self.parent_case_name + "...")

        while not self.finish:
            try:
                self.dut.expect_any(
                    (re.compile('\(' + str(self.child_case_index) +
                                '\)\s"(\w+)"'), get_child_case_name),
                    (self.WAIT_SIGNAL_PATTERN,
                     device_wait_action),  # wait signal pattern
                    (self.SEND_SIGNAL_PATTERN,
                     device_send_action),  # send signal pattern
                    (self.FINISH_PATTERN,
                     handle_device_test_finish),  # test finish pattern
                    timeout=UT_TIMEOUT)
            except ExpectTimeout:
                Utility.console_log("Timeout in expect", color="orange")
                one_device_case_finish(False)
                break
Example #55
0
def run_multiple_stage_cases(env, extra_data):
    """
    extra_data can be 2 types of value
    1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others
    3. as list of string or dict:
               [case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]

    :param extra_data: the case name or case list or case dictionary
    :return: None
    """

    case_config = format_test_case_config(extra_data)

    # we don't want stop on failed case (unless some special scenarios we can't handle)
    # this flag is used to log if any of the case failed during executing
    # Before exit test function this flag is used to log if the case fails
    failed_cases = []

    for ut_config in case_config:
        Utility.console_log("Running unit test for config: " + ut_config, "O")
        dut = env.get_dut("unit-test-app", app_path=ut_config)
        if len(case_config[ut_config]) > 0:
            replace_app_bin(dut, "unit-test-app", case_config[ut_config][0].get('app_bin'))
        dut.start_app()

        for one_case in case_config[ut_config]:
            reset_dut(dut)
            exception_reset_list = []

            for test_stage in range(one_case["child case num"]):
                # select multi stage test case name
                dut.write("\"{}\"".format(one_case["name"]))
                dut.expect("Running " + one_case["name"] + "...")
                # select test function for current stage
                dut.write(str(test_stage + 1))

                # we want to set this flag in callbacks (inner functions)
                # use list here so we can use append to set this flag
                stage_finish = list()

                def last_stage():
                    return test_stage == one_case["child case num"] - 1

                def check_reset():
                    if one_case["reset"]:
                        assert exception_reset_list  # reboot but no exception/reset logged. should never happen
                        result = False
                        if len(one_case["reset"]) == len(exception_reset_list):
                            for i, exception in enumerate(exception_reset_list):
                                if one_case["reset"][i] not in exception:
                                    break
                            else:
                                result = True
                        if not result:
                            Utility.console_log("""Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}"""
                                                .format(one_case["reset"], exception_reset_list),
                                                color="orange")
                    else:
                        # we allow omit reset in multi stage cases
                        result = True
                    return result

                # expect callbacks
                def one_case_finish(result):
                    """ one test finished, let expect loop break and log result """
                    # handle test finish
                    result = result and check_reset()
                    if result:
                        Utility.console_log("Success: " + one_case["name"], color="green")
                    else:
                        failed_cases.append(one_case["name"])
                        Utility.console_log("Failed: " + one_case["name"], color="red")
                    stage_finish.append("break")

                def handle_exception_reset(data):
                    """
                    just append data to exception list.
                    exception list will be checked in ``handle_reset_finish``, once reset finished.
                    """
                    exception_reset_list.append(data[0])

                def handle_test_finish(data):
                    """ test finished without reset """
                    # in this scenario reset should not happen
                    if int(data[1]):
                        # case ignored
                        Utility.console_log("Ignored: " + one_case["name"], color="orange")
                    # only passed in last stage will be regarded as real pass
                    if last_stage():
                        one_case_finish(not int(data[0]))
                    else:
                        Utility.console_log("test finished before enter last stage", color="orange")
                        one_case_finish(False)

                def handle_next_stage(data):
                    """ reboot finished. we goto next stage """
                    if last_stage():
                        # already last stage, should never goto next stage
                        Utility.console_log("didn't finish at last stage", color="orange")
                        one_case_finish(False)
                    else:
                        stage_finish.append("continue")

                while not stage_finish:
                    try:
                        dut.expect_any((RESET_PATTERN, handle_exception_reset),
                                       (EXCEPTION_PATTERN, handle_exception_reset),
                                       (ABORT_PATTERN, handle_exception_reset),
                                       (FINISH_PATTERN, handle_test_finish),
                                       (UT_APP_BOOT_UP_DONE, handle_next_stage),
                                       timeout=one_case["timeout"])
                    except ExpectTimeout:
                        Utility.console_log("Timeout in expect", color="orange")
                        one_case_finish(False)
                        break
                if stage_finish[0] == "break":
                    # test breaks on current stage
                    break

    # raise exception if any case fails
    if failed_cases:
        Utility.console_log("Failed Cases:", color="red")
        for _case_name in failed_cases:
            Utility.console_log("\t" + _case_name, color="red")
        raise AssertionError("Unit Test Failed")
Example #56
0
                       local_vector_size, 5, 5, 4)

    elif local_vec_generator == "bert":
        pass  #TODO train model here instead of externally
        # generate_local_bert_embeddings()

if local_vec_generator == "w2v":
    local_model = models.Word2Vec.load(w2v_file)  #CBOW
    local_tokenizer = None
elif local_vec_generator == "bert":
    local_model = DistilBertModel(
        DistilBertConfig()).from_pretrained(local_bert_model_location)
    local_tokenizer = DistilBertTokenizer.from_pretrained(
        local_bert_model_location)

util = Utility.Utility()
preprocessor = Preprocess.Preprocess()

data_dir = os.path.abspath(
    f"data/multichannel_{global_vec_generator}_global_{local_vec_generator}_local/"
)

if global_vec_generator == "glove":
    global_dim = 50
else:
    global_dim = 768

if not os.path.exists(data_dir):

    with open(dataset_file, "r") as f:
        vocab = [i.rstrip('\n') for i in f.readlines()]
    def getFeedsByText(self,
                       api=None,
                       f1=None,
                       isLive=True,
                       annotation=None,
                       queryText=u'a',
                       textLang=None,
                       isTrain=False,
                       locationArea=None):

        if api is None:
            api = self.getAppObject()

        iteratorRunCount = 0
        isDuplicateList = []
        tweetsRecorded = 0
        reTryCount = 0
        MAX_TWEET = 20
        MAX_TRIES = 10
        queryParam = {}
        while True:
            try:

                # TODO: think of better ways to handle this
                if (
                        iteratorRunCount >= 10
                ):  # hack, this limits the number of tweets you want to retrieve
                    print(
                        "\n ASSUMPTION: there are no tweets as of now. Let's go back! \n\n"
                    )
                    print(u"\n\n\n")
                    return
                else:
                    # try some iteration with original search
                    pass

                time.sleep(
                    int(3600 / 100) + 4
                )  # Let's take 40 seconds pause; twitter rate limit is 100 API calls per hour in total per account; source: https://blog.twitter.com/2008/what-does-rate-limit-exceeded-mean-updated

                if (textLang is not None):
                    queryText = queryText + ' lang:' + textLang

                queryParam['rpp'] = 100
                if ((isLive == True) or (queryText is None)):
                    if queryText is not None:
                        queryParam['track'] = queryText
                    if (locationArea is None) and (queryText is not None):
                        # live tweets without location filter
                        iterator = api.request('statuses/filter',
                                               queryParam).get_iterator()
                    elif (locationArea is not None) and (queryText
                                                         is not None):
                        # live tweets with location filter
                        queryParam['locations'] = GeoLocationModule.getGeoArea(
                            area=locationArea)
                        iterator = api.request('statuses/filter',
                                               queryParam).get_iterator()
                    elif (locationArea is not None) and (queryText is None):
                        self.liveFeedsByLocation(api=api,
                                                 locationArea=locationArea)
                    else:
                        print(
                            "ERROR: locationArea and queryText cannot be None together"
                        )
                        exit(-1)

                else:  # isLive==False
                    queryParam['q'] = queryText
                    if locationArea is None:
                        # search tweets without location filter
                        iterator = api.request('search/tweets',
                                               queryParam).get_iterator()
                    else:
                        # search tweets with location filter
                        queryParam['locations'] = GeoLocationModule.getGeoArea(
                            area=locationArea)
                        iterator = api.request('search/tweets',
                                               queryParam).get_iterator()

                iteratorRunCount += 1

                for item in iterator:
                    if (('text' in item)
                            and (item[u'id'] not in isDuplicateList)
                            and (item[u'retweeted'] == False)):

                        rawTextClean1 = item[u'text'].encode('utf-8')
                        rawTextClean2 = rawTextClean1.strip()
                        rawTextClean3 = rawTextClean2.replace(
                            "#", " ")  # remove hashtags
                        rawTextClean4 = re.sub(
                            r'https?:\/\/.*[\r\n]*',
                            '',
                            rawTextClean3,
                            flags=re.MULTILINE)  # remove urls

                        if (25 < len(rawTextClean4)) and (
                                len(item[u'text']) <
                                140):  # take tweets with sufficient text
                            isDuplicateList.append(item[u'id'])
                            tweetsRecorded += 1

                            rawEnText = TranslationModule.getEnglish(
                                rawTextClean4)
                            fineEnText = rawEnText.replace(",", " ").replace(
                                ";", " ")
                            print(
                                str(tweetsRecorded) + ":\t" + item[u'lang'] +
                                ",\t\t" + annotation.lower() + "\t\t:" +
                                queryText + "\t\t:" + str(len(fineEnText)) +
                                "\n\t:" + fineEnText)

                            emoVector = self.getEmoTaggerObject(
                            ).consolodateResult(fineEnText)
                            listRes = []
                            keyRes = sorted(emoVector)
                            for key in keyRes:
                                listRes.append(emoVector[key])
                            print(listRes, keyRes)

                            listStr1 = str(listRes).replace(",", " ")
                            listStr2 = listStr1[1:-1]
                            listStr3 = listStr2.split()
                            listVector = [
                                float(i) for i in listStr3
                                if Utility.RepresentsNum(i)
                            ]

                            emoLabel = annotation
                            if len(listVector) != 0:
                                assert (len(listVector) == 8
                                        )  # emo-vector length should be 8;
                                if True:  # Training Only
                                    emoTypesCount = 0
                                    for i in range(0, 8, 1):
                                        if (listVector[i] > 0.0):
                                            emoTypesCount += 1

                                    if (emoTypesCount == 0):
                                        emoLabel = "neutral"
                                        print(">> No Emotion \n\n\n")
                                        continue
                                    elif (emoTypesCount >= 5):
                                        emoLabel = "mixed"
                                        print(">> Mixed Emotion \n\n\n")
                                        continue
                                    else:
                                        emoLabel = annotation

                            if isTrain == True:
                                f1.write(
                                    unicode(item[u'id_str']) + "," +
                                    unicode(item[u'created_at']) + "," +
                                    unicode(item[u'lang']) + "," +
                                    unicode(emoLabel).lower() +
                                    "," + unicode(fineEnText).replace(
                                        "\n", " ").replace("\r", " ") + "," +
                                    "\n")
                                f1.flush()
                                os.fsync(f1.fileno())
                            else:
                                Supervised.getPrediction(npVector=numpy.array(
                                    [listRes]),
                                                         model='NBC')
                                Supervised.getPrediction(npVector=numpy.array(
                                    [listRes]),
                                                         model='SVC')

                            if (tweetsRecorded >= MAX_TWEET) or (reTryCount >=
                                                                 MAX_TRIES):
                                print("\n ReTry Count: " + str(reTryCount) +
                                      "\n\n")
                                print(u"\n\n\n")
                                return

                            print(u"\n\n\n")

                    elif 'disconnect' in item:
                        event = item['disconnect']
                        reTryCount += 1

                        if event['code'] in [2, 5, 6, 7]:
                            # something may or may NOT need to be fixed before re-connecting
                            raise Exception(event['reason'])
                        else:
                            # temporary interruption, re-try request
                            break

                    elif (iteratorRunCount > 0) and (
                            tweetsRecorded < MAX_TWEET
                    ):  # Condition when no more unique tweets are found, go back
                        # TODO: think of better ways to handle this
                        if (queryText[0] == '#'):
                            return  # temporary return
                            queryText = queryText[1:]
                            break
                        else:
                            print("\n No more tweets as of now \n\n")
                            print(u"\n\n\n")
                            return

                    else:
                        pass

            except TwitterRequestError as e:
                if e.status_code < 500:
                    print "\n\n" + "MJAGLAN EXCEPTION:\n" + str(e) + "\n\n"
                else:
                    # temporary interruption, re-try request
                    pass

            except TwitterConnectionError:
                # temporary interruption, re-try request
                pass
Example #58
0
def issues_time_window(db, name, month, hr):
    try:
        count = len(db)
        if count > 0:
            entry = []
            for e in db:
                if e.get('type') == 'IssueOpened':
                    entry.append(e.get('created_at'))

            sorted_entry = sorted(entry)

            len_sorted = len(sorted_entry)
            first_day = datetime.strptime(sorted_entry[0], '%Y-%m-%dT%H:%M:%SZ')
            end_date = first_day + timedelta(days=month*30)

            first_months_issues = []
            all_months_issues = []

            for i in range (0, len_sorted):
                if datetime.strptime(sorted_entry[i], '%Y-%m-%dT%H:%M:%SZ') < end_date:
                    first_months_issues.append(sorted_entry[i])
                    all_months_issues.append(sorted_entry[i])
                else:
                    all_months_issues.append(sorted_entry[i])

            first_months_days_between_issues = []
            for dt in range(0, len(first_months_issues) - 1):
                first_months_days_between_issues.append(Utility.time_diff(first_months_issues[dt], first_months_issues[dt + 1]))

            all_months_days_between_issues = []
            for dt in range(0, len(all_months_issues) - 1):
                all_months_days_between_issues.append(Utility.time_diff(all_months_issues[dt], all_months_issues[dt + 1]))

            dist = distribution(all_months_days_between_issues)
            rangee = (dist['mean']+dist['std'])-(dist['mean']-dist['std'])
            issues_count_in_range = issues_in_range(all_months_days_between_issues, hr)

            return {
                'initial_list_on_the_first_months': first_months_days_between_issues,
                'final_list_length': len(all_months_days_between_issues) - len(first_months_days_between_issues),
                'repo_name' : name,
                'issues_count': dist['issue_count'],
                'old_mean': dist['mean'],
                'old_std': dist['std'],
                'old_dense_issues': dist['dense'],
                'old_normal_issues': dist['normal'],
                'old_dispersed_issues': dist['dispersed'],
                'old_dense_issues_prc': dist['dense_prc'],
                'old_normal_issues_prc': dist['normal_prc'],
                'old_dispersed_issues_prc': dist['dispersed_prc'],
                'old_range': rangee,
                'old_issues_dens_in_mean_std_range': dist['normal'] / float(rangee),
                'old_issues_around_mean_1hr': issues_count_in_range[0],
                'old_issues_around_mean_3hr': issues_count_in_range[1],
                'old_issues_around_mean_6hr': issues_count_in_range[2],
                'old_issues_around_mean_12hr': issues_count_in_range[3],
                'old_issues_around_mean_24hr': issues_count_in_range[4]
            }

        else:
            print ' is not a complete processed repository'

    except Exception as er:
        print er.message
Example #59
0
import Utility as ut
a = ut.flipcoin()
Example #60
0
 def close(self):
     super(IDFDUT, self).close()
     if not self.allow_dut_exception and self.get_exceptions():
         Utility.console_log("DUT exception detected on {}".format(self),
                             color="red")
         raise IDFDUTException()