def dictionary(): # Création de dictionnaire en chargeant le jeu de données (dataset) (.txt) for i in xrange(len(line_dataset) - 1): # xrange() Génèration des nombres à la demande # range() pré-calcule tous les nombres et les enregistre en mémoire, ce qui provoque l'erreur. if not re.match(r'([\d]+)', line_dataset[i]): line_temp = i for i in xrange(line_temp + 1, len(line_dataset) - 1): list_source_destination.append( (int(line_dataset[i].strip().split(" ")[0]), int(line_dataset[i].strip().split(" ")[1]), float(line_dataset[i].strip().split(" ")[2]))) list_keys.append(int(line_dataset[i].strip().split(" ")[0]))
def mul(n1, n2): n1.reverse() n2.reverse() n3 = [] print(n1, n2) for i0 in xrange(len(n1) + len(n2)): n3.append(0) for i1 in xrange(len(n1)): for i2 in xrange(len(n2)): n3[i1 + i2] += n1[i1] * n2[i2] for i3 in xrange(len(n3)): if (n3[i3] > 9): n3[i3 + 1] += n3[i3] / 10 n3[i3] = n3[i3] % 10 n3.reverse() return n3
def dijAlgo(lists_sources_lists): for k in xrange(len(list_keys)): for unit in list_source_destination: if unit[0] == k: lists_sources_lists.append((unit[1], unit[2])) else: pass if len(lists_sources_lists) != 0: graph_dijkstra[k] = lists_sources_lists lists_sources_lists = []
def genTestData(es): eventnamelst = [ 'Acceleration', 'Deceleration', 'Sharp_Turn', 'LDWS', 'OverSpeed', 'OverEngine', 'Panic', 'Gsensor', 'Vloss_1', 'Vloss_2', 'Vloss_3', 'Vloss_4', 'Vloss_5', 'Vloss_6', 'Vloss_7', 'Vloss_8', 'SSD_Error', 'SD_Error', 'System_Error', 'ACC_ON', 'ACC_OFF', 'Alarm_1', 'Alarm_2', 'Alarm_3', 'Alarm_4', 'Alarm_5', 'Alarm_6', 'Alarm_7', 'Alarm_8', 'USER_DOWNLOAD', ] eventlen = len(eventnamelst) - 1 carlen = 10 index_name = "es_event" utctime, strtime = __getEpochtimeDay(2021, 5, 1) logging.info(f"utctime={utctime}") for i in xrange(0, 10000): randev = random.randint(0, eventlen) randcar = random.randint(0, carlen) randyear = random.randint(2020, 2022) randmonth = random.randint(1, 12) randday = random.randint(1, 28) utctime, strtime = __getEpochtimeDay(randyear, randmonth, randday) testdata = { "date": utctime, "hdate": strtime, "car_uid": f"car{randcar:06d}", "event_name": f"{eventnamelst[randev]}", "interval": "month", "desc": f"desc-{utctime}" } es.index(index=index_name, body=testdata)
def execute(self, function, *args, **kwargs): # step 1 for x in xrange(LOOPS): self.call(function, *args, **kwargs) del x gc.collect() rss1 = self.get_mem() # step 2 for x in xrange(LOOPS): self.call(function, *args, **kwargs) del x gc.collect() rss2 = self.get_mem() # comparison difference = rss2 - rss1 if difference > TOLERANCE: # This doesn't necessarily mean we have a leak yet. # At this point we assume that after having called the # function so many times the memory usage is stabilized # and if there are no leaks it should not increase any # more. # Let's keep calling fun for 3 more seconds and fail if # we notice any difference. stop_at = time.time() + 3 while 1: self.call(function, *args, **kwargs) if time.time() >= stop_at: break del stop_at gc.collect() rss3 = self.get_mem() difference = rss3 - rss2 if rss3 > rss2: self.fail("rss2=%s, rss3=%s, difference=%s" \ % (rss2, rss3, difference))
def get_files_by_file_size(dirname, reverse=False): """ Return list of file paths in directory sorted by file size """ # Get list of files filepaths = [] for basename in os.listdir(dirname): filename = os.path.join(dirname, basename) if os.path.isfile(filename): filepaths.append(filename) # Re-populate list with filename, size tuples for i in xrange(len(filepaths)): filepaths[i] = (filepaths[i], os.path.getsize(filepaths[i])) # Sort list by file size # If reverse=True sort from largest to smallest # If reverse=False sort from smallest to largest filepaths.sort(key=lambda filename: filename[1], reverse=reverse) # Re-populate list with just filenames for i in xrange(len(filepaths)): filepaths[i] = filepaths[i][0] return filepaths
def dataset_with_dict(): # Variable locale pour l'ensemble de données (dataset) f = open('metro_paris.txt', 'r') # Ouverture de fichier stations_with_dictinary = [] my_stations_with_dict = {} for unit in f.readlines()[1:]: if unit.strip( ) != '[Edges]': #Tant que la ligne représente un nom d'une station stations_with_dictinary.append(unit[5:]) else: break for i in xrange(len( stations_with_dictinary)): # Convertir la liste en dictionnaire my_stations_with_dict[i] = stations_with_dictinary[i] return my_stations_with_dict
def call_many_times(): for x in xrange(LOOPS - 1): self.call(function, *args, **kwargs) del x gc.collect() return self.get_mem()
def cpu_affinity_get(self): from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x] bitmask = cext.proc_cpu_affinity_get(self.pid) return from_bitmask(bitmask)
def call_many_times(): for x in xrange(loops): self._call(fun, *args, **kwargs) del x gc.collect()
def get_process_cpu_affinity(self): from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x] bitmask = _psutil_linux.get_process_cpu_affinity(self.pid) return from_bitmask(bitmask)
def get_process_cpu_affinity(self): from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x] bitmask = _psutil_mswindows.get_process_cpu_affinity(self.pid) return from_bitmask(bitmask)
randyear = random.randint(2020, 2022) randmonth = random.randint(1, 12) randday = random.randint(1, 28) utctime, strtime = __getEpochtimeDay(randyear, randmonth, randday) testdata = { "date": utctime, "hdate": strtime, "car_uid": f"car{randcar:06d}", "event_name": f"{eventnamelst[randev]}", "interval": "month", "desc": f"desc-{utctime}" } es.index(index=index_name, body=testdata) def unit_test(): for i in xrange(0, 12): randmonth = random.randint(1, 12) randday = random.randint(1, 28) print(f"month={randmonth} randday={randday}") if __name__ == "__main__": es = Elasticsearch(hosts="192.168.83.129", port=9200) for i in xrange(0, 100): genTestData(es) sleep(500 / 1000) # unit_test()
def unit_test(): for i in xrange(0, 12): randmonth = random.randint(1, 12) randday = random.randint(1, 28) print(f"month={randmonth} randday={randday}")