def get_language_snippets(self, path, name = None): library = Library() name = self.get_language(path) nodes = library.get_snippets(name) return nodes
def noExtraOptions(options, *arg): options = vars(options) for argOption in arg: Library.removeFromDict(options, argOption) for optionValue in options.values(): if not (optionValue == False): print "Bad option combination" sys.exit()
def singleLogger(elementName,level=None,filename=None): #file writing handler producer=Producer() HOMEPATH= Library.getHomepath() global emulationEndLogger emulationEndLogger=Library.loggerSet("Logger") def logLevelGet(): LOG_LEVEL=logging.INFO LogLevel=Library.readLogLevel("coreloglevel") if LogLevel=="info": LOG_LEVEL=logging.INFO if LogLevel=="debug": LOG_LEVEL=logging.DEBUG else: LOG_LEVEL=logging.INFO return LOG_LEVEL if level==None: level=logLevelGet() fileLogger=logging.getLogger(elementName) fileLogger.setLevel(level) #we do not add additional handlers if they are there if not len(fileLogger.handlers): #adding producer handler #bHandler= EMQproducer.BroadcastLogHandler(elementName,producer) #fileLogger.addHandler(bHandler) #EMQproducer.StreamAndBroadcastHandler("TEST",producer) if filename == None: #setting log rotation for 10 files each up to 10000000 bytes (10MB) fileHandler = handlers.RotatingFileHandler(HOMEPATH+"/logs/COCOMAlogfile.csv",'a', 10000000, 10) fileLoggerFormatter=logging.Formatter ('%(asctime)s;%(name)s;%(levelname)s;%(message)s',datefmt='%m/%d/%Y %H:%M:%S') fileHandler.setFormatter(fileLoggerFormatter) fileLogger.addHandler(fileHandler) #cli writing handler cliLoggerFormatter=logging.Formatter ('%(asctime)s - [%(name)s] - %(levelname)s : %(message)s',datefmt='%m/%d/%Y %H:%M:%S') cliHandler = logging.StreamHandler() cliHandler.setFormatter(cliLoggerFormatter) fileLogger.addHandler(cliHandler) else: fileHandler= logging.FileHandler(HOMEPATH+"/logs/"+str(filename)) fileLoggerFormatter=logging.Formatter ('%(asctime)s;%(name)s;%(levelname)s;%(message)s',datefmt='%m/%d/%Y %H:%M:%S') fileHandler.setFormatter(fileLoggerFormatter) fileLogger.addHandler(fileHandler) return fileLogger
def unref_languages(self): piter = self.model.get_iter_first() library = Library() while piter: if self.is_filled(piter): language = self.get_language(self.model.get_path(piter)) library.save(language) library.unref(language) piter = self.model.iter_next(piter)
def get_tests(): ''' List all available tests in the "/tests" folder ''' ET.register_namespace("test", "http://127.0.0.1/cocoma") response.set_header('Content-Type', 'application/vnd.bonfire+xml') response.set_header('Accept', '*/*') response.set_header('Allow', 'GET, HEAD, POST') testsList=Library.listTests("all") #print "testsList",testsList ''' XML namespaces are used for providing uniquely named elements and attributes in an XML document. ''' #building the XML we will return tests = ET.Element('collection', { 'xmlns':'http://127.0.0.1/cocoma','href':'/tests'}) #<items offset="0" total="2"> items =ET.SubElement(tests,'items', { 'offset':'0','total':str(len(testsList))}) #<distribution href="/emulations/1" name="Emu1"/> for elem in testsList : test = ET.SubElement(items,'test', { 'href':'/tests/'+str(elem),'name':str(elem)}) #<link href="/" rel="parent" type="application/vnd.cocoma+xml"/> lk = ET.SubElement(tests, 'link', {'rel':'parent', 'href':'/', 'type':'application/vnd.bonfire+xml'}) return prettify(tests)
def get_emulators(): ''' Display list of emulators ''' ET.register_namespace("test", "http://127.0.0.1/cocoma") response.set_header('Content-Type', 'application/vnd.bonfire+xml') response.set_header('Accept', '*/*') response.set_header('Allow', 'GET, HEAD') emuList=Library.listEmulators("all") #print "emulist",emuList ''' XML namespaces are used for providing uniquely named elements and attributes in an XML document. ''' #building the XML we will return emulators = ET.Element('collection', { 'xmlns':'http://127.0.0.1/cocoma','href':'/emulators'}) #<items offset="0" total="2"> items =ET.SubElement(emulators,'items', { 'offset':'0','total':str(len(emuList))}) #<emulator href="/emulations/1" name="Emu1"/> for elem in emuList : emulator = ET.SubElement(items,'emulator', { 'href':'/emulators/'+str(elem),'name':str(elem)}) #<link href="/" rel="parent" type="application/vnd.cocoma+xml"/> lk = ET.SubElement(emulators, 'link', {'rel':'parent', 'href':'/', 'type':'application/vnd.bonfire+xml'}) return prettify(emulators)
def show_all_results(): ''' Show summary of emulations results ''' #curl -k -i http://10.55.164.232:8050/distributions/linear ET.register_namespace("test", "http://127.0.0.1/cocoma") response.set_header('Content-Type', 'application/vnd.bonfire+xml') response.set_header('Accept', '*/*') response.set_header('Allow', 'GET, HEAD') emuList=Library.getEmulationList("all") #root element resultsCollection = ET.Element('collection', { 'xmlns':'http://127.0.0.1/cocoma','href':'/results'}) items =ET.SubElement(resultsCollection,'items', { 'offset':'0','total':str(len(emuList))}) for elem in emuList : failedRunsInfo=elem["failedRunsInfo"] #print "---->\nID: "+str(elem["ID"])+"\nName: "+str(elem["Name"])+"\nState: "+str(elem["State"])+"\nTotal Runs: "+str(elem["runsTotal"])+"\nExecuted Runs: "+str(elem["runsExecuted"])+"\nFailed Runs: "+str(len(failedRunsInfo)) emuResults = ET.SubElement(items,'results', { 'href':'/results/'+str(elem["Name"]),'failedRuns':str(len(failedRunsInfo)),'name':str(elem["Name"]),'state':str(elem["State"])}) #<link href="/" rel="parent" type="application/vnd.cocoma+xml"/> lk = ET.SubElement(resultsCollection, 'link', {'rel':'parent', 'href':'/', 'type':'application/vnd.bonfire+xml'}) return prettify(resultsCollection)
def __init__(self): self.dir = os.getcwd() self.library = Library(self.dir) #self.musicPlayer = threading.Thread(target=MusicPlayer.__init__, args = [self.library]) self.musicPlayer = MusicPlayer(self.library) self.musicPlayer.start() self.commandList = "download (song) - downloads a song to your library \nlist (songs/playlists) [start] - Lists your songs or playlists, optionally starting with certain character(s) \ncreate playlist (name) - creates a playlist with given name \nadd (song) :: (playlist) - adds given song in your library to given playlist \nshuffle [playlist] - shuffles your main library or a specific playlist \nplay (song/playlist) - plays the current music if no argument give, or plays a song or a playlist \ndelete (song/playlist) - removes a song or a playlist \npause - pauses the music \nrewind - rewinds the music \nskip - skips to the next song\ninfo - gives info about Tünz \nexit - exits Tünz"
def loadMon(duration,interval,emulationID,emulationName,emuStartTime): HOMEPATH= Library.getHomepath() emulationName=str(emulationName) interval=int(interval) ''' starting cpu monitoring in the loop ''' iterationsNo=int(duration)/int(interval) try: f = open(HOMEPATH+"/logs/"+str(emulationID)+"-"+str(emulationName)+"-res"+"_"+str(emuStartTime)+".csv", 'a') f.write(emulationName+";\nCountdown;Time;CPU(%);MEM(%);IOread(bytes);IOwrite(bytes);NET(bytes_sent)\n") #start time initTime=time.time() while iterationsNo !=0: CPU=str(psutil.cpu_percent(interval, False)) #MEM=str(psutil.virtual_memory().percent) MEM=str(psutil.avail_virtmem()) IOr=str(psutil.disk_io_counters().read_time) IOw=str(psutil.disk_io_counters().write_time) NET=str(psutil.network_io_counters(False).bytes_sent) #print (emulationName+";\nTime;CPU(%);MEM(%);IOread(bytes);IOwrite(bytes);NET(bytes_sent)\n"+str(time.time())+";"+CPU+";"+MEM+";"+IOr+";"+IOw+";"+NET) probeTime=time.time()-initTime timeStamp=dt.now() f.write(str(int(probeTime))+";"+str(timeStamp.strftime("%Y-%m-%d %H:%M:%S.%f"))+";"+CPU+";"+MEM+";"+IOr+";"+IOw+";"+NET+"\n") iterationsNo=iterationsNo-1 except Exception,e: print "Unable to create log file\nError: ",e
def fuzzLoad(emulationID, distributionID, runNo, min, fuzzRange, serverip, serverport, protocol, salt, timedelay): runBackfuzzPidNo = 0 if (timedelay == 0 or timedelay == "0"): timedelay = 0.8 try: print "python", BACKFUZZ_PATH, "-h", str(serverip), "-p", str( serverport), "-min", str(min), "-max", str( int(min) + int(fuzzRange)), "-s ", str(salt), "-pl", str( protocol).upper(), "-t", str(timedelay) runBackfuzz = subprocess.Popen( [ "python", BACKFUZZ_PATH, "-h", str(serverip), "-p", str(serverport), "-min", str(min), "-max", str(int(min) + int(fuzzRange)), "-s", str(salt), "-pl", str(protocol).upper(), "-t", str(timedelay), "&&" ], cwd=BACKFUZZ_PATH[:-11], stdin=subprocess.PIPE) #,stdout=subprocess.PIPE) runBackfuzz.stdin.flush() runBackfuzz.stdin.write("\r\n") runBackfuzzPidNo = runBackfuzz.pid if zombieBuster(runBackfuzzPidNo, "backfuzz"): print "Job failed, sending wait()." runBackfuzz.wait() message = "Error in the emulator execution" executed = "False" dbWriter(distributionID, runNo, message, executed) return False else: print "Success! waiting on process to finish running" runBackfuzz.wait() print "Process finished, writing into DB" message = "Success" executed = "True" dbWriter(distributionID, runNo, message, executed) print "Process stopped, trying to schedule next job" try: import Library, DistributionManager daemon = Library.getDaemon() newEmulation = daemon.getEmuObject(emulationID) DistributionManager.createDistributionRuns(newEmulation) except Exception, e: print "Emulation object error: ", e return True except Exception, e: return "run_backfuzzer job exception: ", e
def GetVCXProj(path: str): tree = ET.parse(path) root = tree.getroot() xmlns = lb.XML_GetNameSpace(root) if xmlns != None: ET.register_namespace('', xmlns) return tree, xmlns
def on_entry_accelerator_key_press(self, entry, event): source_view = self['source_view_snippet'] if event.keyval == gdk.keyval_from_name('Escape'): # Reset entry.set_text(self.snippet.accelerator_display()) self.tree_view.grab_focus() return True elif event.keyval == gdk.keyval_from_name('Delete') or \ event.keyval == gdk.keyval_from_name('BackSpace'): # Remove the accelerator entry.set_text('') self.snippet['accelerator'] = '' self.tree_view.grab_focus() self.snippet_changed() return True elif Library().valid_accelerator(event.keyval, event.state): # New accelerator self.set_accelerator(event.keyval, \ event.state & gtk.accelerator_get_default_mod_mask()) entry.set_text(self.snippet.accelerator_display()) self.snippet_changed() self.tree_view.grab_focus() else: return True
def process_ATOM(self, rec): ## load current atom since this record indicates a new atom if self.atm_map: self.load_atom(self.atm_map) self.atm_map = {} ## optimization atm_map = self.atm_map ## always derive element from atom name for PDB files -- they are ## too messed up to use the element column try: name = rec["name"] except KeyError: atm_map["name"] = "" atm_map["element"] = "" else: atm_map["name"] = name.strip() res_name = rec.get("resName", "") gelement = Library.library_guess_element_from_name(name, res_name) if gelement != None: atm_map["element"] = gelement ## additional atom information if rec.has_key("serial"): atm_map["serial"] = rec["serial"] if rec.has_key("altLoc"): atm_map["alt_loc"] = rec["altLoc"] if rec.has_key("resName"): atm_map["res_name"] = rec["resName"] if rec.has_key("chainID"): atm_map["chain_id"] = rec["chainID"] ## construct fragment_id if rec.has_key("resSeq"): if rec.has_key("iCode"): atm_map["fragment_id"] = "%d%s" % (rec["resSeq"],rec["iCode"]) else: atm_map["fragment_id"] = "%d" % (rec["resSeq"]) ## add the model number for the atom if self.model_num != None: atm_map["model_id"] = self.model_num ## position if rec.has_key("x"): atm_map["x"] = rec["x"] if rec.has_key("y"): atm_map["y"] = rec["y"] if rec.has_key("z"): atm_map["z"] = rec["z"] if rec.has_key("occupancy"): atm_map["occupancy"] = rec["occupancy"] if rec.has_key("tempFactor"): atm_map["temp_factor"] = rec["tempFactor"]
def disease_neighbors(G, centroid): neighbors, neighbors_dis = ct.centroid_neighbors(G, centroid) disease_dis = dict() path_list = [] hop_distance = dict() node_tag = dict() for p in neighbors: temp_path = [] hop = 0 try: if G.node[p]['tag'] == 'ST' or G.node[p]['tag'] == 'DS': for n in neighbors[p]: hop += 1 temp_path.append(n) if n not in node_tag: node_tag[n] = G.node[n]['tag'] if G.node[p]['tag'] == 'ST' or G.node[p]['tag'] == 'DS': disease_dis[n] = neighbors_dis[n] if n not in hop_distance: hop_distance[ n] = hop - 1 # not count centroid. path ['centroid', 'neighbors'] if len(temp_path) > 1 and temp_path not in path_list: path_list.append(temp_path) except: pass return (path_list, hop_distance, node_tag, dict(sorted(disease_dis.items(), key=operator.itemgetter(1))))
def clustered_hitogram(df_var, save=False): """ Автор: Анатолий Лернер Цель: Формурует и илюстрирует отчёт гистограмма Вход: dataframe, флаг: нужно ли спасти Выход: Нет (новое окно и спасённый файл) """ # находим один качественный столбец category = df_var.select_dtypes(include=np.object) column_name = category.columns[0] # набор уникальнтых элемнтов в нём category = set(list(category[column_name])) data_list = [] x_name = "" for z_var in category: # временный DataFrame содержащий все строки с данным качественным # атрибутом tmp = df_var.loc[df_var[column_name] == z_var] # выкидываем качественный столбец. После этого ровно один числовой # столбец должен остатся tmp = tmp.drop(column_name, axis=1) # формируем списки с информацией x_name = tmp.columns[0] data_list.append(list(tmp.iloc[:, 0])) # print(data_list) # гинерируем список уникальных цветов colors = lib.get_color_list(len(category)) # устанавлуваем график plt.hist(data_list, G_HIST_POTS, density=1, histtype='bar', color=colors, label=list(category), zorder=3) plt.xlabel(TAG_DICT[x_name], labelpad=G_FONT_PAD, fontsize=G_FONT_SIZE, fontstyle=G_FONT_STYLE) plt.ylabel(G_HIST_YLABEL, labelpad=G_FONT_PAD, fontsize=G_FONT_SIZE, fontstyle=G_FONT_STYLE) plt.legend(framealpha=1, bbox_to_anchor=(G_XA_LABEL, G_YA_LABEL), loc=G_POS_LABEL) plt.grid(True, linestyle=G_GRID_LINE, axis=G_GRID_AXIS, zorder=0) plt.title(ANALYSIS_NAMES[3] + ": " + TAG_DICT[column_name], pad=G_TITLE_PAD, fontsize=G_TITLE_SIZE, fontstyle=G_FONT_STYLE) plt.tight_layout() if save: filename = ANALYSIS_NAMES[3] + " [" + \ TAG_DICT[column_name] + "][" + TAG_DICT[x_name] + "]" plt.savefig('./Graphics/' + filename + G_EXP_FORMAT) else: plt.show()
def filter_entry_format_test(results): """ Автор: Виталий Павленко Цель: Проверяет если введённые значения для атрибутов соответствует тому или иному типу Вход: Словарь содержащий введённые атрибуты и значения что им присвоил пользователь Выход: Тупль из флага (легален ли ввод) и строку с ошибкой """ formats = { 'Cost': 'float', 'Avai': 'int', 'Volt': 'float', 'Bits': 'int', 'Inps': 'int', 'Rati': 'float' } for x_var in results.keys(): lst, flg = lib.words_to_list(results[x_var]) # проверяем что разделитель '+' используется правильно if not flg: return (False, MSG_PLUS_ERROR_P1 + TAG_DICT[x_var] + MSG_PLUS_ERROR_P2) # проверяем что некоротые строки содержат числа в правильном формате if x_var in formats.keys(): if formats[x_var] == 'int': try: lst = list(map(int, lst)) except BaseException: return (False, TAG_DICT[x_var] + MSG_INT_ERROR) if formats[x_var] == 'float': try: lst = list(map(float, lst)) except BaseException: return (False, TAG_DICT[x_var] + MSG_FLOAT_ERROR) return (True, "")
def __init__(self, num_of_players=4, passed=True, winner_starts=True): """Initializes board. num_of_players: number of players that will play the game passed: whether you can pass cards in the game """ # Checks if enough players are in the game if num_of_players < 2: # Raises error otherwise raise AttributeError("Can't Play with less than 2 people") # Calculates and keeps the number of players that are playing and the number of decks it needs to shuffle self.Num_of_players = num_of_players self.Num_of_decks = math.floor(num_of_players/5) # Creates Deck Object self.Current_Library = Library.Library(self.Num_of_decks) # Creates list of players self.Players = [Player.Player() for x in range(self.Num_of_players)] # Sets game type self.Passed = passed # Sets if the winner starts next game self.Winner_starts = winner_starts # Initializes all the unknown variables self.Trump_suit = None self.Current_attacker = 0 self.Current_defender = 0 self.Attack_zone = None self.Attacking_ranks = None self.Mode = None self.Winner = None self.Has_started_defending = None
def createEmulation(emulationName, emulationType, emulationLog, emulationLogFrequency, emulationLogLevel, resourceTypeEmulation, startTimeEmu, stopTimeEmu, distroList, xmlData, MQproducerValues): global producer producer = EMQproducer.Producer() # print "Who calls "+sys._getframe(0).f_code.co_name+": "+sys._getframe(1).f_code.co_name producer.init() #producer.sendmsg("Emulation Manager",str(emulationName)+": request received") msg = { "Action": "Emulation request received", "UserEmulationName": str(emulationName) } producer.sendmsg(myName, msg) if startTimeEmu.lower() == "now": startTimeEmu = Library.emulationNow(2) try: # here we creating emulation object newEmulation = Emulation.emulation(emulationName, emulationType, emulationLog, emulationLogFrequency, emulationLogLevel, resourceTypeEmulation, startTimeEmu, stopTimeEmu, xmlData) except Exception, e: return "Unable to create emulation:\n" + str(e)
def setDistrType(self, distrType): distroList = Library.listDistributions("all") for distName in distroList: if distrType.lower() == distName.lower(): return distrType raise Exception("Distribution module '%s' does not exist. Check the name" % (distrType))
def new_snippet(self, properties=None): if not self.language_path: return None snippet = Library().new_snippet(self.get_language(self.language_path), properties) return Snippet(snippet)
def estimate_year_data(self, years, frequency): """ Update variable that hold timeseries data after adding growth data. These method should be called after add_growth_data and before the optimization is run. Args: years (List): list of years for which analysis will occur on frequency (str): period frequency of the timeseries data """ data_year = self.price.index.year.unique() no_data_year = {pd.Period(year) for year in years} - { pd.Period(year) for year in data_year } # which years do we not have data for if len(no_data_year) > 0: for yr in no_data_year: source_year = pd.Period(max(data_year)) source_data = self.price[self.price.index.year == source_year. year] # use source year data new_data = Lib.apply_growth(source_data, self.growth, source_year, yr, frequency) self.price = pd.concat([self.price, new_data], sort=True) # add to existing
def call_main_menu_new(): """ Автор: Анатолий Лернер Цель: При нажатии вкладки 'Новый' в меню 'База данных' Организует создание новой базы данных Вход: Нет Выход: Изменённые глобальные переменные """ global table_core #pylint: disable=C0103 global table_comp #pylint: disable=C0103 global table_manf #pylint: disable=C0103 global table_all #pylint: disable=C0103 global current_database_path #pylint: disable=C0103 path = fbox.asksaveasfilename( initialfile=DEFAULT_NEW_DATABASE + DATABASE_FORMAT, initialdir="./Data/", title=SAVEAS_TITLE, filetypes=[(DATABASE_FORMAT_INTRO, "*" + DATABASE_FORMAT)]) if path != "": current_database_path = path folder, name, extension = lib.split_path(path, DATABASE_FORMAT) table_core, table_comp, table_manf, table_all = call_new_database() call_save_source_database(folder + name + extension, table_core, table_comp, table_manf) root.title(W_WINDOW_TITLE + name + extension) call_main_init()
def add_growth_data(self, df, opt_years, verbose=False): """ Helper function: Adds rows to df where missing opt_years Args: df (DataFrame): given data opt_years (List): List of Period years where we need data for verbose (bool): Returns: df (DataFrame): TODO: might be a good idea to move back to Library change this to work with OOP framework """ data_year = df.index.year.unique() # which years was data given for # which years is data given for that is not needed dont_need_year = {pd.Period(year) for year in data_year} - {pd.Period(year) for year in opt_years} if len(dont_need_year) > 0: for yr in dont_need_year: df_sub = df[df.index.year != yr.year] # choose all data that is not in the unneeded year df = df_sub data_year = df.index.year.unique() # which years do we not have data for no_data_year = {pd.Period(year) for year in opt_years} - {pd.Period(year) for year in data_year} # if there is a year we dont have data for if len(no_data_year) > 0: for yr in no_data_year: source_year = pd.Period(max(data_year)) # which year to to apply growth rate to (is this the logic we want??) # create new dataframe for missing year new_index = pd.date_range(start='01/01/' + str(yr), end='01/01/' + str(yr + 1), freq=self.frequency, closed='left') new_data = pd.DataFrame(index=new_index) source_data = df[df.index.year == source_year.year] # use source year data def_rate = self.growth_rates['default'] # for each column in growth column for col in df.columns: # look for specific growth rate in params, else use default growth rate name = col.split(sep=' ')[0].lower() col_type = col.split(sep=' ')[1].lower() if col_type == 'load (kw)' or col_type == 'gen (kw/rated kw)': # if name in self.growth_rates.keys(): # rate = self.growth_rates[name] # else: u_logger.info('Using default growth rate (' + str(def_rate) + ') for' + str(name)) rate = def_rate else: rate = 0 new_data[col] = Lib.apply_growth(source_data[col], rate, source_year, yr, self.frequency) # apply growth rate to column # add new year to original data frame df = pd.concat([df, new_data], sort=True) return df
def rec(N,tot,number,isG=False): sums=0 for i in xrange(0,10): num = number*10+i if isG and Library.miller_rabin(num,10): sums+=num if num % (tot+i)!=0:continue div = num/(tot+i) if N < lim-1: if N !=1: if Library.miller_rabin(div,10): sums+=rec(N+1,tot+i,num,True) else: sums+=rec(N+1,tot+i,num) else: sums+=rec(N+1,tot+i,num) return sums
def checkLoadValues(resourceType, distArgs): maxResourceLoad = Library.getResourceLimit(resourceType) loads = [] if distArgs.has_key("startload"): loads.append(int(distArgs["startload"])) if distArgs.has_key("stopload"): loads.append(int(distArgs["stopload"])) #Subtracts current MEM usage from maximum system MEM if (resourceType.upper() == "MEM"): maxResourceLoad -= Library.getMemUsed() for load in loads: if (load > (maxResourceLoad * 0.9)): errorStr = resourceType.upper( ) + " close to maximum value. Re-send with force (-f) to run" return errorStr return False
def RemoveIncludes(root: ET.Element, xmlns): items = [] lb.XML_GetSubElements(items, root, 'ItemGroup', xmlns) removeItemGroups = [] for item in items: if len(item.attrib) > 0: continue removeItems = [] for inc in item: if lb.XML_GetTag(inc, xmlns) == 'ClInclude': removeItems.append(inc) for i in removeItems: item.remove(i) if len(item) == 0: removeItemGroups.append(item) for i in removeItemGroups: root.remove(i)
def start_recognition(self): """ stream_volume = set link user volume """ lan = Library.get_iris_conf('language') stream_state = 0 ;brocast_time = 0 ;wifi_count = 0 ;quantenna_time = 0 ;Line = 1 ;trigger_state = 0 ;talk = 0 ;tmp_voip = 0 ;party_mode = 0 ;stream_volume = 3 Library.start_program_wait("/usr/bin/CSpotterDemo_x86 /usr/bin/Trigger.bin&") while True: #step 1 : check iris trigger_state , angle = cspotter_check(lan) #step 2 : check voip voip_state = Library.file_get('/tmp/file/sound_busy_check') #step 3 : check party mode or ann stream_state = stream_check(stream_state , stream_volume) if trigger_state == 1 and voip_state == 9 :
def get(self): if 'filePath' in request.args: if request.args['filePath'] == "": return {'Error': 'Empty parameters'} else: lineNumber, _, _, _ = Library.commandExecute( "grep -n " + request.args['filePath'] + "$ monitorFilesList.txt |sed 's/:/ /' |awk '{print $1}'") if lineNumber.strip() == "": return {"Error": "No such file path"} else: Library.commandExecute("sed -i -e '" + lineNumber.strip() + "d' monitorFilesList.txt") return {"Success": "File removed from monitoring"} else: return {'Error': 'Data not passed'}
def rec(N, tot, number, isG=False): sums = 0 for i in xrange(0, 10): num = number * 10 + i if isG and Library.miller_rabin(num, 10): sums += num if num % (tot + i) != 0: continue div = num / (tot + i) if N < lim - 1: if N != 1: if Library.miller_rabin(div, 10): sums += rec(N + 1, tot + i, num, True) else: sums += rec(N + 1, tot + i, num) else: sums += rec(N + 1, tot + i, num) return sums
def _set_library_num(self, num): self._library_num = num if self.library_id == '': if num > 0: tmp = Library.Library(self.library_num) self.library_id = tmp.id else: self.library_id = ''
def getArgs(xmlStr, moduleType, moduleName, resourceType): moduleArgsDict = {} moduleArgsNotes = [] checkNote = "\nOK" moduleArgs = getModuleArgs(moduleType, moduleName, resourceType) moduleMethod = getArgsModule(moduleName, moduleType) if (type(moduleMethod) is str): print moduleMethod sys.exit(0) else: if type(moduleArgs) is list: #If the list is returned for arg in moduleArgs: argDict = arg[1] arg = arg[0].lower() xmlArg = getXMLData(xmlStr, arg, "") #Convert stress values mem to real values (if given in %) if ((moduleType.lower() == "distribution") and (resourceType == "mem") and (str(xmlArg[-1]) == "%")): sysMemory = Library.getTotalMem() xmlArg = (int(str(xmlArg[:-1])) * sysMemory) / 100 (xmlArg, checkNote) = Library.boundsCompare(xmlArg, argDict, arg) if ('accepted' in argDict): if not (Library.checkAcceptedArg(xmlArg, argDict['accepted'])): raise Exception("\nXML Error: The value for " + arg + " was not in the accepted range (" + str(argDict['accepted']) + ")") moduleArgsNotes.append(checkNote) moduleArgsNotes.append(xmlArg) moduleArgsDict.update({arg: xmlArg}) else: errorStr = moduleArgs + "\nXML Error: Cannot get " + moduleType + " arguments, check if 'href' and 'name' exist in XML" xmlLogger.error(errorStr) raise Exception(errorStr) return (moduleArgsDict, moduleArgsNotes)
def centroid(symptom_set): keywords = [] for key in symptom_set: keywords.append(key) centroid = ctd.spreading_activation_centroid(G, keywords) return centroid
def setDistrType(self, distrType): distroList = Library.listDistributions("all") for distName in distroList: if distrType.lower() == distName.lower(): return distrType raise Exception( "Distribution module '%s' does not exist. Check the name" % (distrType))
def toString(self): if (self.type == "Enemy"): return self.name + " has " + str( self.attr1) + " hitpoints and does " + str( self.attr2) + " damage!" elif (self.type == "Key"): return self.name + " opens door " + str(self.attr1) elif (self.type == "Potion"): return self.name elif (self.type == "item"): if (self.name in Library.getWeaponsList()): return self.name + " which does " + str(self.attr1) + " damage" elif (self.name in Library.getArmorsList()): return self.name + " which does " + str( self.attr2) + " defense" else: return self.name return self.name
def createCustomJob(self,emulationID,distributionID,emulationLifetimeID,duration,emulator,emulatorArg,resourceTypeDist,stressValue,runNo,PROCNAME,emuDuration): schedFileLogger.debug("-> createCustomJob(self,emulationID,distributionID,emulationLifetimeID,duration,emulator,emulatorArg,resourceTypeDist,stressValue,runNo,PROCNAME,emuDuration)") distributionName= emulator+"customJob" if Library.checkPid(PROCNAME): return 2 else: try: # self.sched.add_date_job(Job.createRun, time.strftime("%Y-%m-%d %H:%M:%S", Library.timestamp(self.timestamp(Library.timeConv(Library.emulationNow(2))))), args = [emulationID, distributionID, emulationLifetimeID, duration, emulator, emulatorArg, resourceTypeDist, stressValue, runNo, emuDuration], name = str(emulationID) + distributionName + "-" + str(distributionID) + "-" + str(runNo) + "-" + distributionName + "-" + str(emulator) + "-" + str(resourceTypeDist) + ": " + str(stressValue)) self.sched.add_date_job(Job.createRun, Library.timeConv(Library.emulationNow(2)), args = [emulationID, distributionID, emulationLifetimeID, duration, emulator, emulatorArg, resourceTypeDist, stressValue, runNo, emuDuration], name = str(emulationID) + distributionName + "-" + str(distributionID) + "-" + str(runNo) + "-" + distributionName + "-" + str(emulator) + "-" + str(resourceTypeDist) + ": " + str(stressValue)) schedFileLogger.info("Created Custom Job: "+str(emulationID)+distributionName+"-"+str(distributionID)+"-"+str(runNo)+"-"+distributionName+"-"+str(emulator)+"-"+str(resourceTypeDist)+": "+str(stressValue)) return 1 except Exception,e: schedFileLogger.debug("Values:"+str(emulationID)+"-"+str(distributionID)+"-"+str(distributionName)+"-"+str(emulationLifetimeID)+"-"+str(duration)+"-"+str(emulator)+"-"+str(emulatorArg)+"-"+str(resourceTypeDist)+"-"+str(stressValue)+"-"+str(runNo)+"-"+str(emuDuration)) schedFileLogger.error("Scheduler reateCustomJob(): error creating Job check values") schedFileLogger.exception(str(e)) return 0
def createEndJob(daemon, newEmulation): returnEmulationName = (str(newEmulation.emulationID) + "-" + newEmulation.emulationName) #Calcualte when the end job should be (start + duration +1) //+1 is to allow for delay in scheduler starting emulation emulationEndTime = (Library.timeConv( newEmulation.startTimeEmu)) + dt.timedelta( 0, (float(newEmulation.stopTimeEmu) + 1)) emulationEndJobReply = daemon.createEmulationEndJob( emulationEndTime, returnEmulationName)
def unpacking_mis(file_path): str_reads = None str_write = None if not os.path.isfile(file_path): print "WARNING: file " + file_path + " don't exist." with codecs.open(file_path, 'r', encoding='gb2312', errors='ignore') as log: # 在读取完read和write之后用jion()方法合并字符串,然后去空格 reads = [] # 空read报文容器 writes = [] # 空write报文容器 # 记录读写状态 read = False write = False rev_buf = None for line in log.readlines(): pid = Library.ID(line) if rev_buf is None: rev_buf = Library.DIYSearch( Configuration.mis_clt_key_words['recv buf'], line) message_head = Library.message_head(line) if pid: # 如果开头是pid文 # 1.报文头 # 2.连续的报文尾 # 如果检测到报文类型关键词且是第一次出现 if 'write2 .....' in line and write is False: write = True if read is True: read = False # 释放read开关节省时间 elif 'read .....' in line and read is False: read = True if write is True: write = False elif 'read from MISP len' in line and read != 0: str_write = "".join(writes).replace("\n", "") str_reads = "".join(reads).replace("\n", "") return str_write, str_reads, rev_buf else: # 如果不是pid头 if message_head: # 只有可能是报文的头部被检测到了 if write: # print("W:" + line) writes += str( get_pure_8583( Library.message_head(line) + ": ", Library.message_tail(line), line)).split(" ") if read: # print("R:" + line) reads += str( get_pure_8583( Library.message_head(line) + ": ", Library.message_tail(line), line)).split(" ") return str_write, str_reads, rev_buf
def get_Jobs(): jobList = Library.getJobList() jobListLength = len(jobList) listSplitIndex = -1 currentJobList = [] for index, job in enumerate(jobList): if job.find("Currently running jobs") >= 0: listSplitIndex = index if listSplitIndex != -1: #Splits into two lists, if there are jobs currently running currentJobList = jobList[listSplitIndex + 1:jobListLength] jobList = jobList[0:listSplitIndex] jobListLength = len(jobList) + len(currentJobList) def jobsToXML(jobList, currentJobList, totalJobs): #Converts job lists to xml foramt ET.register_namespace("jobs", "http://127.0.0.1/cocoma") response.set_header('Content-Type', 'application/vnd.bonfire+xml') response.set_header('Accept', '*/*') response.set_header('Allow', 'GET') jobXmlRoot = ET.Element('collection', { 'xmlns': 'http://127.0.0.1/cocoma', 'href': '/jobs' }) jobCollection = ET.SubElement(jobXmlRoot, 'items', { 'offset': '0', 'total': str(totalJobs) }) for job in jobList: jobXML = ET.SubElement(jobCollection, "Job") jobXML.text = job if len(currentJobList) > 0: for currentJob in currentJobList: currentJobXML = ET.SubElement(jobCollection, "currentlyRunningJob") currentJobXML.text = str(currentJob) lk = ET.SubElement(jobXmlRoot, 'link', { 'rel': 'parent', 'href': '/', 'type': 'application/vnd.bonfire+xml' }) return prettify(jobXmlRoot) # jobXML = jobsToXML(jobList, currentJobList, jobListLength) # response.status = 200 # return jobXML try: jobXML = jobsToXML(jobList, currentJobList, jobListLength) response.status = 200 return jobXML except Exception, e: response.status = 400 return "<ERROR>Unable to get job list: " + e + " </ERROR>"
def startAPI(IP_ADDR, PORT_ADDR): if Library.daemonCheck() == 0: print "\n---Check if Scheduler Daemon is started. Connection error---" sys.exit(0) print "API IP address:", IP_ADDR API_HOST = run(host=IP_ADDR, port=PORT_ADDR) return IP_ADDR
def task2(): lib = Library(1, '51 Some str., NY') lib += Book.Book('Leo Tolstoi', 'War and Peace') lib += Book.Book('Charles Dickens', 'David Copperfield') for book in lib: print(book._id) print(book) print(book.tag())
def startAPI(IP_ADDR,PORT_ADDR): if Library.daemonCheck() ==0: print "\n---Check if Scheduler Daemon is started. Connection error---" sys.exit(0) print"API IP address:",IP_ADDR API_HOST=run(host=IP_ADDR, port=PORT_ADDR) return IP_ADDR
def test_return_library_item(self): b1 = Book("345", "Phantom Tollbooth", "Juster") a1 = Album("456", "...And His Orchestra", "The Fastbacks") m1 = Movie("567", "Laputa", "Miyazaki") p1 = Patron("abc", "Felicity") p2 = Patron("bcd", "Waldo") lib = Library() lib.add_library_item(b1) lib.add_library_item(a1) lib.add_patron(p1) lib.add_patron(p2) lib.check_out_library_item("bcd", "456") self.assertEqual(lib.return_library_item("567"), "item not found") self.assertEqual(lib.return_library_item("345"), "item already in library") self.assertEqual(lib.return_library_item("456"), "return successful") self.assertEqual(a1.get_location(), ON_SHELF)
def main(): library = Library() library.read_book_collection() print len(library.collection), 'books in collection.' print "Ready for input. Type 'help()' for a list of commands.\n" command = '\0' while command != 'quit()': try: command = raw_input('Library command: ').strip() if len(command) == 0: print "What? Speak up!\n" else: eval('library.' + command) print library.response library.response = '' except AttributeError, e: print "Sorry, I didn't understand:", command print "Type 'help()' for a list of the things I do understand.\n" except Exception, e: print "Unexpected error:", e
def __eq__(self, other): """ Determines whether Constraint object equals Constraint case object. Args: other (Constraint): Constraint object to compare Returns: bool: True if objects are close to equal, False if not equal. """ return sh.compare_class(self, other)
def one_letter_code(self): """Return the one letter code representation of the sequence as a string. """ seqlist = list() for threeletter in self.sequence_list: mdesc = Library.library_get_monomer_desc(threeletter) if mdesc is not None and mdesc.one_letter_code: seqlist.append(mdesc.one_letter_code) else: seqlist.append("X") return "".join(seqlist)
def setUp(self): global cal cal = Calendar() global library library = Library() library.read_book_collection() patron1 = Patron("Amy Gutmann") patron2 = Patron("Ryan Smith") patron3 = Patron("Steve Schenkle") patron4 = Patron("Dr Dave") full_book_collection = library.get_collection() full_book_collection[0].check_out(1) full_book_collection[1].check_out(2) full_book_collection[2].check_out(2) full_book_collection[3].check_out(20) full_book_collection[4].check_out(20) full_book_collection[5].check_out(20) full_book_collection[6].check_out(1) full_book_collection[7].check_out(5) full_book_collection[8].check_out(4) patron1.set_books(full_book_collection[0:3]) patron2.set_books(full_book_collection[3:6]) patron3.set_books(full_book_collection[6:9]) patron_set = [patron1, patron2, patron3, patron4] library.set_patrons(patron_set)
def dbWriter (distributionID,runNo,message,executed): try: conn =Library.dbconn() c = conn.cursor() c.execute('UPDATE runLog SET executed=? ,message=? WHERE distributionID =? and runNo=?',(executed,message,distributionID,runNo)) conn.commit() except sqlite.Error, e: c.close() conn.close() schedFileLogger.debug("Values: "+str(distributionID)+"-"+str(runNo)+"-"+str(message)+"-"+str(executed)) schedFileLogger.error("Unable to connect to DB") schedFileLogger.exception(str(e)) sys.exit(1)
def logLevelGet(): LOG_LEVEL=logging.INFO LogLevel=Library.readLogLevel("coreloglevel") if LogLevel=="info": LOG_LEVEL=logging.INFO if LogLevel=="debug": LOG_LEVEL=logging.DEBUG else: LOG_LEVEL=logging.INFO return LOG_LEVEL
def __init__(self, hermes, username): self.username = username self.enc_key = "private_key" self.playlists = [] self.library = Library(self) self.load() for filer in os.listdir(Settings.pathman["user"]): #>? move to library? if filer.startswith("playlist_"): #print "Adding playlist " , file playlist = Playlist(filer, self) self.playlists.append(playlist)
def __init__(self,emulationID,distributionID,emulationLifetimeID,resourceTypeDist,duration,emulatorArg, stressValues,runNo,emuDuration): if not (os.path.isfile(Library.readBackfuzzPath())): print "Backfuzz_path is incorrect. Use 'ccmsh -b' to set it" sys.exit(0) if resourceTypeDist.lower() == "net": netFuzzProc = multiprocessing.Process(target = fuzzLoad, args=(emulationID, distributionID, runNo, emulatorArg["min"],emulatorArg["fuzzrange"], emulatorArg["serverip"], emulatorArg["serverport"], emulatorArg["protocol"], emulatorArg["salt"], emulatorArg["timedelay"])) netFuzzProc.start() wrtiePIDtable (netFuzzProc.pid, "Scheduler") #Stops event based scheduling once EMU time expires print(netFuzzProc.is_alive()) netFuzzProc.join()
def start_test(): ''' Execute an existing emulation XML from "/tests" folder ''' ET.register_namespace("test", "http://127.0.0.1/cocoma") response.set_header('Content-Type', 'application/vnd.bonfire+xml') response.set_header('Accept', '*/*') response.set_header('Allow', 'GET, HEAD, POST') emulationID="" fileName_stream =request.files.data fileName_stream_body =request.body.read() if fileName_stream: try: filename=HOMEPATH+"/tests/"+str(fileName_stream_body) #check if file exists maybe? except Exception,e: print e response.status = 400 return "<error>"+str(e)+"</error>" #print "File data detected:\n",fileName_stream #return fileName_stream try: (emulationName,emulationType,emulationLog,emulationLogFrequency,emulationLogLevel, resourceTypeEmulation, startTimeEmu,stopTimeEmu, distroList,xmlData, MQproducerValues) = XmlParser.xmlFileParser(filename, True) if startTimeEmu.lower() =="now": startTimeEmu = Library.emulationNow(2) emulationID=EmulationManager.createEmulation(emulationName,emulationType,emulationLog,emulationLogFrequency,emulationLogLevel, resourceTypeEmulation, startTimeEmu,stopTimeEmu, distroList,xmlData, MQproducerValues) else: emulationID=EmulationManager.createEmulation(emulationName,emulationType,emulationLog,emulationLogFrequency,emulationLogLevel, resourceTypeEmulation, startTimeEmu,stopTimeEmu, distroList,xmlData, MQproducerValues) except Exception,e: print e response.status = 400 return "<error>Cannot parse:"+str(e)+"</error>"
def __init__(self, ui): MPlayer.populate() # Handle actions asynchronously self.player_q = queue.Queue() t = threading.Thread(target=Player.go, args=(self.player_q,)) t.daemon = True t.start() # rather static.. self.ui = ui self.lib = Library() self.notifier = Notifier(); self.alarm = AlarmClock() # Really stateful variables self.lastSong = None self.mp = None
def fuzzLoad(emulationID, distributionID, runNo, min, fuzzRange, serverip, serverport, protocol, salt, timedelay): runBackfuzzPidNo=0 if (timedelay == 0 or timedelay == "0"): timedelay = 0.8 try: print "python", BACKFUZZ_PATH, "-h", str(serverip), "-p", str(serverport), "-min", str(min), "-max", str(int(min) + int(fuzzRange)), "-s ", str(salt), "-pl", str(protocol).upper(), "-t", str(timedelay) runBackfuzz = subprocess.Popen(["python",BACKFUZZ_PATH, "-h", str(serverip), "-p", str(serverport), "-min", str(min), "-max", str(int(min) + int(fuzzRange)), "-s", str(salt), "-pl", str(protocol).upper(), "-t", str(timedelay), "&&"], cwd= BACKFUZZ_PATH[:-11], stdin=subprocess.PIPE)#,stdout=subprocess.PIPE) runBackfuzz.stdin.flush() runBackfuzz.stdin.write("\r\n") runBackfuzzPidNo =runBackfuzz.pid if zombieBuster(runBackfuzzPidNo, "backfuzz"): print "Job failed, sending wait()." runBackfuzz.wait() message="Error in the emulator execution" executed="False" dbWriter(distributionID,runNo,message,executed) return False else: print "Success! waiting on process to finish running" runBackfuzz.wait() print "Process finished, writing into DB" message="Success" executed="True" dbWriter(distributionID,runNo,message,executed) print "Process stopped, trying to schedule next job" try: import Library,DistributionManager daemon=Library.getDaemon() newEmulation=daemon.getEmuObject(emulationID) DistributionManager.createDistributionRuns(newEmulation) except Exception,e: print "Emulation object error: ",e return True except Exception, e: return "run_backfuzzer job exception: ", e
def get_Jobs(): jobList = Library.getJobList() jobListLength = len(jobList) listSplitIndex = -1 currentJobList = [] for index, job in enumerate(jobList): if job.find("Currently running jobs") >= 0: listSplitIndex = index if listSplitIndex != -1: #Splits into two lists, if there are jobs currently running currentJobList = jobList[listSplitIndex+1:jobListLength] jobList = jobList[0:listSplitIndex] jobListLength = len(jobList) + len(currentJobList) def jobsToXML (jobList, currentJobList, totalJobs): #Converts job lists to xml foramt ET.register_namespace("jobs", "http://127.0.0.1/cocoma") response.set_header('Content-Type', 'application/vnd.bonfire+xml') response.set_header('Accept', '*/*') response.set_header('Allow', 'GET') jobXmlRoot = ET.Element('collection', { 'xmlns':'http://127.0.0.1/cocoma','href':'/jobs'}) jobCollection = ET.SubElement(jobXmlRoot, 'items', {'offset':'0','total':str(totalJobs)}) for job in jobList: jobXML = ET.SubElement(jobCollection, "Job") jobXML.text = job if len(currentJobList) > 0: for currentJob in currentJobList: currentJobXML = ET.SubElement(jobCollection, "currentlyRunningJob") currentJobXML.text = str(currentJob) lk = ET.SubElement(jobXmlRoot, 'link', {'rel':'parent', 'href':'/', 'type':'application/vnd.bonfire+xml'}) return prettify(jobXmlRoot) # jobXML = jobsToXML(jobList, currentJobList, jobListLength) # response.status = 200 # return jobXML try: jobXML = jobsToXML(jobList, currentJobList, jobListLength) response.status = 200 return jobXML except Exception, e: response.status = 400 return "<ERROR>Unable to get job list: " + e + " </ERROR>"
def emulationEnd(emulationName): """ IN: job that executes at the end of emulation DOING: just producing logger notification OUT: nothing """ try: print "Emulation Time expired, removing extra jobs and stopping running processes" global emulationEndLogger msg = {"Action":"Emulation finished","EmulationName":str(emulationName)} producer.sendmsg(myName,msg) emulationEndLogger.info(msg) #emulationEndLogger.info("Emulation '"+str(emulationName)+"' finished.") Library.removeExtraJobs(emulationName) Library.killRemainingProcesses() Library.deleteFiles("/tmp/stressapptestFile", "*") # Remove any stressappTest files left behind from I/O loading return True except: return False
def createDistribution(newEmulation): daemon=Library.getDaemon() global producer if producer is None: # print "In distributionManager, copying producer" producer = producer() # print "Who calls "+sys._getframe(0).f_code.co_name+": "+sys._getframe(1).f_code.co_name # distLoggerDM = "" # if distLoggerDM is None: # distLoggerDM=Library.loggerSet("Distribution Manager",str(newEmulation.emulationID)+"-"+str(newEmulation.emulationName)+"-syslog"+"_"+str(newEmulation.startTimeEmu)+".csv") # connecting to the DB and storing parameters loggerJobReply = "No logger scheduled" try: conn = Library.dbconn() c = conn.cursor() # 1. Populate "emulation" c.execute('INSERT INTO emulation (emulationName,emulationType,resourceType,active,logging,logFrequency,logLevel,xmlData) VALUES (?, ?, ?, ?, ?, ?, ?, ?)', [newEmulation.emulationName, newEmulation.emulationType, newEmulation.resourceTypeEmulation, 1, newEmulation.emulationLog, newEmulation.emulationLogFrequency, newEmulation.emulationLogLevel, newEmulation.xmlData]) newEmulation.setEmulationID(c.lastrowid) # start logger here once we know emulation ID # distLoggerDM = singleLogger("Distribution Manager", None, str(newEmulation.emulationID) + "-" + str(newEmulation.emulationName) + "-syslog" + "_" + str(newEmulation.startTimeEmu) + ".csv") c.execute('UPDATE emulation SET emulationName=? WHERE emulationID =?', (str(newEmulation.emulationID) + "-" + newEmulation.emulationName, newEmulation.emulationID)) # 2. We populate "emulationLifetime" table c.execute('INSERT INTO emulationLifetime (startTime,stopTime,emulationID) VALUES (?,?,?)', [newEmulation.startTimeEmu, newEmulation.stopTimeEmu, newEmulation.emulationID]) emulationLifetimeID = c.lastrowid newEmulation.setEmulationLifetimeID(emulationLifetimeID) c.execute('UPDATE emulationLifetime SET stopTime=? WHERE emulationLifetimeID =?',(newEmulation.stopTimeEmu,emulationLifetimeID)) c.execute('UPDATE emulation SET emulationLifetimeID=? WHERE emulationID=?',(emulationLifetimeID,newEmulation.emulationID)) conn.commit() c.close() """ # Here we create runs """ # raise Exception ("newEmulation.emulationLog = " + str(newEmulation.emulationLog)) if newEmulation.emulationLog == "1": # creating run for logger with probe interval of 2 seconds distLoggerDM = singleLogger("Distribution Manager", None, str(newEmulation.emulationID) + "-" + str(newEmulation.emulationName) + "-syslog" + "_" + str(newEmulation.startTimeEmu) + ".csv") interval = int(newEmulation.emulationLogFrequency) singleRunStartTime = Library.timeConv(newEmulation.startTimeEmu) loggerJobReply = daemon.createLoggerJob(singleRunStartTime, newEmulation.stopTimeEmu, interval, newEmulation.emulationID, newEmulation.emulationName, newEmulation.startTimeEmu) createEndJob(daemon, newEmulation) except sqlite.Error, e: print e return "SQL error:", e sys.exit(1)
def dateOverlapCheck(self, startTime, stopTime): startTimeSec = Library.timestamp(Library.timeConv(startTime)) stopTimeSec = startTimeSec + float(stopTime) # print startTimeSec # print stopTimeSec dtNowSec = Library.timestamp(dt.now()) # print "dt.now():",dt.now() # print "dtNow:",dtNowSec if startTimeSec <= dtNowSec or stopTimeSec <= dtNowSec: print "Error: Dates cannot be in the past" return "Error: Dates cannot be in the past" if startTimeSec >= stopTimeSec: print "Start Date cannot be the same or later than stop time" return n = "1" try: conn = Library.dbconn() c = conn.cursor() c.execute('SELECT startTime, stopTime FROM emulationLifetime') emulationLifetimeFetch = c.fetchall() if emulationLifetimeFetch: for row in emulationLifetimeFetch: # print row startTimeDBsec = Library.timestamp(Library.timeConv(row[0])) stopTimeDBsec = startTimeDBsec + float(row[1]) if startTimeSec >= startTimeDBsec and startTimeSec <= stopTimeDBsec: # print "Emulation already exist for this date change the date(1)" n = "Emulation already exist for this date change the date(1)" if stopTimeSec >= startTimeDBsec and stopTimeSec <= stopTimeDBsec: # print "Emulation already exist for this date change the date(2)" n = "Emulation already exist for this date change the date(2)" if startTimeSec <= startTimeDBsec and stopTimeSec >= stopTimeDBsec: # print "Emulation already exist for this date change the date(3)" n = "Emulation already exist for this date change the date(3)" else: pass conn.commit() c.close() except Exception, e: print "dateOverlapCheck() SQL Error %s:" % e.args[0] print e return str(e)
def checktoexit(): while Library.alive(): sleep(3) os._exit(1) print("QUIT")
from threading import Thread from time import sleep import os def checktoexit(): while Library.alive(): sleep(3) os._exit(1) print("QUIT") t = Thread(target=checktoexit) t.start() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_address = ('localhost', 5558) sock.bind(server_address) sock.listen(1) while Library.alive(): connection, client_address = sock.accept() try: while 1==1: data = connection.recv(8) sys.stdout.flush() if data: sys.stdout.write(data) else: break finally: connection.close()
import Library update_id = Library.update(1)['result'][0]['update_id'] a = update_id while 1: update = Library.update(-1) if a != update['result'][0]['update_id']: print(update['result'][0]['update_id']) if update['result'][0]['update_id'] - a != 1: print('loss:', update['result'][0]['update_id'] - a - 1) a = update['result'][0]['update_id'] if update_id != update['result'][0]['update_id']: chat_id = update['result'][0]['message']['chat']['id'] chat_id = str(chat_id) #start if update['result'][0]['message']['text'] == '/start': Library.send_start_message(chat_id) update_id = update['result'][0]['update_id'] continue #help if update['result'][0]['message']['text'] == '/help':