Пример #1
0
    def test_get_traced_memory(self):
        # Python allocates some internals objects, so the test must tolerate
        # a small difference between the expected size and the real usage
        max_error = 2048

        # allocate one object
        obj_size = 1024 * 1024
        tracemalloc.clear_traces()
        obj, obj_traceback = allocate_bytes(obj_size)
        size, peak_size = tracemalloc.get_traced_memory()
        self.assertGreaterEqual(size, obj_size)
        self.assertGreaterEqual(peak_size, size)

        self.assertLessEqual(size - obj_size, max_error)
        self.assertLessEqual(peak_size - size, max_error)

        # destroy the object
        obj = None
        size2, peak_size2 = tracemalloc.get_traced_memory()
        self.assertLess(size2, size)
        self.assertGreaterEqual(size - size2, obj_size - max_error)
        self.assertGreaterEqual(peak_size2, peak_size)

        # clear_traces() must reset traced memory counters
        tracemalloc.clear_traces()
        self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))

        # allocate another object
        obj, obj_traceback = allocate_bytes(obj_size)
        size, peak_size = tracemalloc.get_traced_memory()
        self.assertGreaterEqual(size, obj_size)

        # stop() also resets traced memory counters
        tracemalloc.stop()
        self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
    def test_task_memory_threshold(self):
        diff = None
        def log_func():
            nonlocal diff
            size, max_size = tracemalloc.get_traced_memory()
            diff = (size - old_size)

        obj_size  = 1024 * 1024
        threshold = int(obj_size * 0.75)

        old_size, max_size = tracemalloc.get_traced_memory()
        task = tracemalloctext.Task(log_func)
        task.set_memory_threshold(threshold)
        task.schedule()

        # allocate
        obj = allocate_bytes(obj_size)
        time.sleep(MEMORY_CHECK_DELAY)
        self.assertIsNotNone(diff)
        self.assertGreaterEqual(diff, threshold)

        # release
        diff = None
        old_size, max_size = tracemalloc.get_traced_memory()
        obj = None
        time.sleep(MEMORY_CHECK_DELAY)
        size, max_size = tracemalloc.get_traced_memory()
        self.assertIsNotNone(diff)
        self.assertLessEqual(diff, threshold)
Пример #3
0
    def check_track(self, release_gil):
        nframe = 5
        tracemalloc.start(nframe)

        size = tracemalloc.get_traced_memory()[0]

        frames = self.track(release_gil, nframe)
        self.assertEqual(self.get_traceback(),
                         tracemalloc.Traceback(frames))

        self.assertEqual(self.get_traced_memory(), self.size)
Пример #4
0
 def on_epoch_end(self, last_metrics, **kwargs):
     cpu_used, cpu_peak =  list(map(lambda x: int(x/2**20), tracemalloc.get_traced_memory()))
     self.peak_monitor_stop()
     gpu_used = gpu_mem_get_used_no_cache() - self.gpu_before
     gpu_peak = self.gpu_mem_used_peak      - self.gpu_before
     # can be negative, due to unreliable peak monitor thread
     if gpu_peak < 0:   gpu_peak = 0
     # since we want the overhead only, subtract delta used if it's positive
     elif gpu_used > 0: gpu_peak -= gpu_used
     # The numbers are deltas in MBs (beginning of the epoch and the end)
     return add_metrics(last_metrics, [cpu_used, cpu_peak, gpu_used, gpu_peak])
Пример #5
0
def tracemalloc_dump() -> None:
    if not tracemalloc.is_tracing():
        logger.warning("pid {}: tracemalloc off, nothing to dump"
                       .format(os.getpid()))
        return
    # Despite our name for it, `timezone_now` always deals in UTC.
    basename = "snap.{}.{}".format(os.getpid(),
                                   timezone_now().strftime("%F-%T"))
    path = os.path.join(settings.TRACEMALLOC_DUMP_DIR, basename)
    os.makedirs(settings.TRACEMALLOC_DUMP_DIR, exist_ok=True)

    gc.collect()
    tracemalloc.take_snapshot().dump(path)

    procstat = open('/proc/{}/stat'.format(os.getpid()), 'rb').read().split()
    rss_pages = int(procstat[23])
    logger.info("tracemalloc dump: tracing {} MiB ({} MiB peak), using {} MiB; rss {} MiB; dumped {}"
                .format(tracemalloc.get_traced_memory()[0] // 1048576,
                        tracemalloc.get_traced_memory()[1] // 1048576,
                        tracemalloc.get_tracemalloc_memory() // 1048576,
                        rss_pages // 256,
                        basename))
Пример #6
0
def add_tracemalloc_metrics(snapshot):
    size, max_size = tracemalloc.get_traced_memory()
    snapshot.add_metric('tracemalloc.traced.size', size, 'size')
    snapshot.add_metric('tracemalloc.traced.max_size', max_size, 'size')

    if snapshot.traces:
        snapshot.add_metric('tracemalloc.traces', len(snapshot.traces), 'int')

    size, free = tracemalloc.get_tracemalloc_memory()
    snapshot.add_metric('tracemalloc.module.size', size, 'size')
    snapshot.add_metric('tracemalloc.module.free', free, 'size')
    if size:
        frag = free / size
        snapshot.add_metric('tracemalloc.module.fragmentation', frag, 'percent')
Пример #7
0
    def schedule(self):
        task = self._task_ref()
        memory_threshold = task.get_memory_threshold()
        delay = task.get_delay()

        if memory_threshold is not None:
            traced = tracemalloc.get_traced_memory()[0]
            self.min_memory = traced - memory_threshold
            self.max_memory = traced + memory_threshold
        else:
            self.min_memory = None
            self.max_memory = None

        if delay is not None:
            self.timeout = _time_monotonic() + delay
        else:
            self.timeout = None
Пример #8
0
    def once(self):
        delay = None

        if self.min_memory is not None:
            traced = tracemalloc.get_traced_memory()[0]
            if traced <= self.min_memory:
                return None
            if traced >= self.max_memory:
                return None
            delay = self.memory_delay

        if self.timeout is not None:
            dt = (self.timeout - _time_monotonic())
            if dt <= 0:
                return None
            if delay is not None:
                delay = min(delay, dt)
            else:
                delay = dt

        return delay
Пример #9
0
    def compute(self):
        args = self.args

        if args.track_memory:
            if MS_WINDOWS:
                from perf._win_memory import get_peak_pagefile_usage
            else:
                from perf._memory import PeakMemoryUsageThread
                mem_thread = PeakMemoryUsageThread()
                mem_thread.start()

        if args.tracemalloc:
            import tracemalloc
            tracemalloc.start()

        WorkerTask.compute(self)

        if args.tracemalloc:
            traced_peak = tracemalloc.get_traced_memory()[1]
            tracemalloc.stop()

            if not traced_peak:
                raise RuntimeError("tracemalloc didn't trace any Python "
                                   "memory allocation")

            # drop timings, replace them with the memory peak
            self._set_memory_value(traced_peak)

        if args.track_memory:
            if MS_WINDOWS:
                mem_peak = get_peak_pagefile_usage()
            else:
                mem_thread.stop()
                mem_peak = mem_thread.peak_usage

            if not mem_peak:
                raise RuntimeError("failed to get the memory peak usage")

            # drop timings, replace them with the memory peak
            self._set_memory_value(mem_peak)
Пример #10
0
    def pyfaidx_fasta(n):
        print('timings for pyfaidx.Fasta')
        ti = []
        tf = []
        for _ in range(n):
            t = time.time()
            f = pyfaidx.Fasta(fa_file.name)
            ti.append(time.time() - t)

            t = time.time()
            read_dict(f, headers)
            tf.append(time.time() - t)
            os.remove(index)
        # profile memory usage and report timings
        tracemalloc.start()
        f = pyfaidx.Fasta(fa_file.name)
        read_dict(f, headers)
        os.remove(index)
        print(tracemalloc.get_traced_memory())
        print(mean(ti))
        print(mean(tf)/nreads/10*1000*1000)
        tracemalloc.stop()
Пример #11
0
    def pyfaidx_bgzf_faidx(n):
        print('timings for pyfaidx.Faidx with bgzf compression')
        ti = []
        tf = []
        for _ in range(n):
            t = time.time()
            f = pyfaidx.Faidx(fa_file.name + '.gz')
            ti.append(time.time() - t)

            t = time.time()
            read_faidx(f, headers)
            tf.append(time.time() - t)
            os.remove(index)
        # profile memory usage and report timings
        tracemalloc.start()
        f = pyfaidx.Faidx(fa_file.name + '.gz')
        read_faidx(f, headers)
        os.remove(index)
        print(tracemalloc.get_traced_memory())
        print(mean(ti))
        print(mean(tf)/nreads/10*1000*1000)
        tracemalloc.stop()
Пример #12
0
    def fastahack_fetch(n):
        print('timings for fastahack.FastaHack')
        ti = []
        tf = []
        for _ in range(n):
            t = time.time()
            f = fastahack.FastaHack(fa_file.name)
            ti.append(time.time() - t)

            t = time.time()
            read_fastahack(f, headers)
            tf.append(time.time() - t)
            os.remove(index)
        # profile memory usage and report timings
        tracemalloc.start()
        f = fastahack.FastaHack(fa_file.name)
        read_fastahack(f, headers)
        os.remove(index)
        print(tracemalloc.get_traced_memory())
        print(mean(ti))
        print(mean(tf)/nreads/10*1000*1000)
        tracemalloc.stop()
Пример #13
0
def loop(*, size, times):
    for i in range(times):
        print(len(L))
        g(size)
        print([t._format_size(x, False) for x in t.get_traced_memory()])
        snapshot = t.take_snapshot().filter_traces(
            (
                t.Filter(False, "<frozen importlib._bootstrap>"),
                t.Filter(False, "*tracemalloc*"),
                t.Filter(False, "*linecache*"),
                t.Filter(False, "*sre_*"),
                t.Filter(False, "*re.py"),
                t.Filter(False, "*fnmatch*"),
                t.Filter(False, "*tokenize*"),
                t.Filter(False, "<unknown>"),
            )
        )

        for stat in snapshot.statistics("lineno", cumulative=False)[:3]:
            print("----------------------------------------")
            print(t._format_size(stat.size, False))
            for line in stat.traceback.format():
                print(line)
        print("========================================")
Пример #14
0
    def seqio_read(n):
        print('timings for Bio.SeqIO')
        ti = []
        tf = []
        for _ in range(n):
            t = time.time()
            fh = open(fa_file.name)
            f = SeqIO.to_dict(SeqIO.parse(fh, "fasta"))
            ti.append(time.time() - t)

            t = time.time()
            read_dict(f, headers)
            tf.append(time.time() - t)
            fh.close()
        # profile memory usage and report timings
        tracemalloc.start()
        fh = open(fa_file.name)
        f = SeqIO.to_dict(SeqIO.parse(fh, "fasta"))
        read_dict(f, headers)
        fh.close()
        print(tracemalloc.get_traced_memory())
        print(mean(ti))
        print(mean(tf)/nreads/100*1000*1000)
        tracemalloc.stop()
Пример #15
0
    def pyfasta_fseek(n):
        print('timings for pyfasta.Fasta (fseek)')
        ti = []
        tf = []
        for _ in range(n):
            t = time.time()
            f = pyfasta.Fasta(fa_file.name, record_class=pyfasta.FastaRecord)
            ti.append(time.time() - t)

            t = time.time()
            read_dict(f, headers)
            tf.append(time.time() - t)
            os.remove(fa_file.name + '.flat')
            os.remove(fa_file.name + '.gdx')
        # profile memory usage and report timings
        tracemalloc.start()
        f = pyfasta.Fasta(fa_file.name, record_class=pyfasta.FastaRecord)
        read_dict(f, headers)
        os.remove(fa_file.name + '.flat')
        os.remove(fa_file.name + '.gdx')
        print(tracemalloc.get_traced_memory())
        print(mean(ti))
        print(mean(tf)/nreads/10*1000*1000)
        tracemalloc.stop()
Пример #16
0
        self.route = route
        self.date = date
        self.daytype = daytype
        self.rides = rides


def read_rides(filename):
    '''
    Read the bus ride data as a list of tuples
    '''
    rows = []
    f = open(filename)
    f_csv = csv.reader(f)
    headings = next(f_csv)  # Skip headers
    for row in f_csv:
        route = row[0]
        date = row[1]
        daytype = row[2]
        rides = int(row[3])
        record = Ride(route, date, daytype, rides)
        rows.append(record)
    f.close()
    return rows


if __name__ == '__main__':
    import tracemalloc
    tracemalloc.start()
    rows = read_rides('Data/ctabus.csv')
    print('Memory Use: Current %d, Peak %d' % tracemalloc.get_traced_memory())
Пример #17
0
        if char not in mapFinal.keys():
            stack.append(char)
        else:
            charPop = stack.pop()
            if charPop == mapPair[char]:
                continue
            else:
                mapFinal[char] += 1
                corrupted = True
                break

    if not corrupted:
        inv_map = {v: k for k, v in mapPair.items()}
        totalPoints = 0
        while len(stack) > 0:
            totalPoints = 5 * totalPoints + mapPoints[inv_map[stack.pop()]]

        listPoints.append(totalPoints)

else:
    print(len(listPoints))
    print(int((len(listPoints) / 2)) + 1)
    print(sorted(listPoints))
    print(sorted(listPoints)[int((len(listPoints) / 2))])

#####################################################################

print("--- %s miliseconds ---" % ((time.time() - start_time) * 1000))
print("--- %s MB used ---" % (tracemalloc.get_traced_memory()[1] / 1000000))

tracemalloc.stop()
Пример #18
0
def mem_count():
    if MEM_DEBUG:
        logging.info("Memory: current: %s, peak: %s" % tuple(
            (tracemalloc._format_size(m, False)
             for m in tracemalloc.get_traced_memory())))
Пример #19
0
 def on_epoch_end(self, last_metrics, **kwargs):
     cpu_used, cpu_peak = list(
         map(lambda x: float(x / 2**20), tracemalloc.get_traced_memory()))
     self.peak_monitor_stop()
     # The numbers are deltas in MBs (beginning of the epoch and the end)
     return add_metrics(last_metrics, [cpu_used, cpu_peak])
Пример #20
0
        elif t > M:
            M = t
            x1 = i
            x2 = j

    return M, x1, x2


#Generate an array (here size of 50000)
a = np.random.randint(-10, 10, 50000)
a = np.array(a)
#Sart tracking memory
tracemalloc.start()
#Start tracking time
begin = time.time()
max_sum_subarray_kadane = kadane_algorithm(a, len(a))
#Stop tracking time
end = time.time()
#Stopo tracking memory
snapshot = tracemalloc.take_snapshot()
for stat in snapshot.statistics("lineno"):
    print("stat kadane")
    print(stat)
    print(stat.traceback.format())
#Print peak memory
print("\nTraced Memory (Current, Peak): ", tracemalloc.get_traced_memory())
#Print time spent
print("Time:")
print(end - begin)
Пример #21
0
def blockMerge(ALLBLOCKFILE, BLOCKSTOMERGE, BLOCKPATH, spimi_index):
    print(
        "=============== Merging SPIMI blocks into final inverted index... ==============="
    )
    tracemalloc.start()
    Filewrite = open('Merge/invert_index.txt', "w+")
    iterlist = []
    term = ""
    current_term = ""
    startmerge = time.process_time()
    for BLOCKFILE in ALLBLOCKFILE:
        print("File Name:", BLOCKFILE)
        print("-- Reading into memory... ", BLOCKFILE.split(".txt", 1)[0])

        finaldict = {}

        l = open(BLOCKPATH + BLOCKFILE)
        Fileread = open('Merge/invert_index.txt')
        Initialfile = Fileread.read()
        if (Initialfile.strip()):
            lst = Initialfile.strip().split('\n')
            for i in range(len(lst)):
                val = lst[i].split(" -----------> ")
                finaldict[val[0]] = val[1]
        else:
            finaldict = {}

        iterlist = (l.read().strip().split('\n'))
        for l2 in range(len(iterlist)):
            ksplit = iterlist[l2].split(
                " -----------> "
            )  # ['aaaaaq', '[[2136, 1]]'] OR ['aam', '[[1782, 1], [1786, 1]]']
            if (finaldict.get(ksplit[0]) != None):
                postlingvalold = json.loads(finaldict.get(
                    ksplit[0]))  # [[1,4],[2,5]]
                newblock = json.loads(ksplit[1])
                for i in range(len(newblock)):
                    if newblock[i] not in postlingvalold:
                        #print("THIS IS THE NEWBLOCK MATCHING THE CONDITION : ", newblock[i])
                        postlingvalold.append(newblock[i])
                finaldict[ksplit[0]] = str(postlingvalold)
            else:
                current_term = ksplit[0]
                term = term + current_term.capitalize()
                finaldict[ksplit[0]] = ksplit[1]

        sorted(finaldict)

        Filewrite = open('Merge/invert_index.txt', "w+")
        Filewrite1 = open('Merge/invert_actual_index.txt', "w+")
        indexwriter1 = open('Merge/index_cp_1.txt', "w+")
        indexwriter1.write(term)
        for key, value in sorted(finaldict.items()):
            Filewrite.write(key + " -----------> " + value + "\n")
            Filewrite1.write(key + " -----------> " + value + "\n")
        print("Finished merging block: ",
              BLOCKFILE.split(".txt", 1)[0], " and writing to disk")
        endmerge = time.process_time()
        eachmerge = endmerge - startmerge
        print("\n Time taken after each Block merge : ", eachmerge, "\n")
        Fileread.close()
        Filewrite.close()
        Filewrite1.close()
        indexwriter1.close()
        current, peak = tracemalloc.get_traced_memory()
        print(f" After merge : Current memory usage is {current / 10**6}MB")
        tracemalloc.stop()
Пример #22
0
    f = open(filename)
    f_csv = csv.reader(f)
    headings = next(f_csv)     # Skip headers
    for row in f_csv:
        route = row[0]
        date = row[1]
        daytype = row[2]
        rides = int(row[3])
        record = (route, date, daytype, rides)
        rows.append(record)
    f.close()
    return rows
    
def read_rides_into_dict() #slower

def read_rides_into_namedtuple() #medium

def read_rides_into_class() #slowest

def read_rides_into_class_w_slots() #faster

def read_rides_via_pandas() #fastest

if __name__ == '__main__':
    import tracemalloc
    tracemalloc.start()
    rows = read_rides_into_tuple('Data/ctabus.csv')
    print('Memory Use: Current %d, Peak %d' % tracemalloc.get_traced_memory())
    rows = read_rides_into_dict('Data/ctabus.csv')
    print('Memory Use: Current %d, Peak %d' % tracemalloc.get_traced_memory())
Пример #23
0
    def post_run_cell(self, result):
        logger.debug(f"post_run_cell: 1 {self.exp}")
        if not self.running: return

        self.time_delta = time.time() - self.time_start

        if self.backend != 'cpu':
            self.peak_monitoring = False

        if self.gc_collect: gc.collect()

        # instead of needing a peak memory monitoring thread, tracemalloc does
        # the job of getting newly used and peaked memory automatically, since
        # it tracks all malloc/free calls.
        cpu_mem_used_delta, cpu_mem_used_peak = tracemalloc.get_traced_memory()
        tracemalloc.stop() # reset accounting

        with self.lock:
            if self.exp is None: return
            self.cpu_mem_used_new = self.exp.cpu_ram_used()
        self.cpu_mem_used_delta   = cpu_mem_used_delta
        self.cpu_mem_peaked_delta = max(0, cpu_mem_used_peak - cpu_mem_used_delta)

        if self.backend != 'cpu':
            with self.lock:
                if self.exp is None: return
                self.gpu_mem_used_new = self.exp.gpu_ram_used()

            # delta_used is the difference between current used mem and used mem at the start
            self.gpu_mem_used_delta = self.gpu_mem_used_new - self.gpu_mem_used_prev

            # peaked_delta is the overhead if any. It is calculated as follows:
            #
            # 1. The difference between the peak memory and the used memory at the
            # start is measured:
            # 2a. If it's negative, then peaked_delta is 0
            # 2b. Otherwise, if used_delta is positive it gets subtracted from peaked_delta
            # XXX: 2a shouldn't be needed once we have a reliable peak counter
            self.gpu_mem_peaked_delta = self.gpu_mem_used_peak - self.gpu_mem_used_prev
            if self.gpu_mem_peaked_delta <= 0:
                self.gpu_mem_peaked_delta = 0
            elif self.gpu_mem_used_delta > 0:
                self.gpu_mem_peaked_delta -= self.gpu_mem_used_delta

        if self.compact:
            if 1:
                out  = f"CPU: {b2mb(self.cpu_mem_used_delta):0.0f}/{b2mb(self.cpu_mem_peaked_delta):0.0f}/{b2mb(self.cpu_mem_used_new):0.0f} MB"
            if self.backend != 'cpu':
                out += f" | GPU: {b2mb(self.gpu_mem_used_delta):0.0f}/{b2mb(self.gpu_mem_peaked_delta):0.0f}/{b2mb(self.gpu_mem_used_new):0.0f} MB"
            out += f" | Time {secs2time(self.time_delta)} | (Consumed/Peaked/Used Total)"
            print(out)
        else:
            if 1:
                vals  = [self.cpu_mem_used_delta, self.cpu_mem_peaked_delta, self.cpu_mem_used_new]
            if self.backend != 'cpu':
                vals += [self.gpu_mem_used_delta, self.gpu_mem_peaked_delta, self.gpu_mem_used_new]
            w = int2width(*map(b2mb, vals)) + 1 # some air
            if w < 10: w = 10 # accommodate header width
            pre = '・ '
            print(f"{pre}RAM: {'△Consumed':>{w}} {'△Peaked':>{w}}    {'Used Total':>{w}} | Exec time {secs2time(self.time_delta)}")
            if 1:
                print(f"{pre}CPU: {b2mb(self.cpu_mem_used_delta):{w},.0f} {b2mb(self.cpu_mem_peaked_delta):{w},.0f} {b2mb(self.cpu_mem_used_new):{w},.0f} MB |")
            if self.backend != 'cpu':
                print(f"{pre}GPU: {b2mb(self.gpu_mem_used_delta):{w},.0f} {b2mb(self.gpu_mem_peaked_delta):{w},.0f} {b2mb(self.gpu_mem_used_new):{w},.0f} MB |")

        # for self.data accessor
        self.cpu_mem_used_prev = self.cpu_mem_used_new
        if self.backend != 'cpu':
            self.gpu_mem_used_prev = self.gpu_mem_used_new

        self.data = CellLoggerData(
            CellLoggerMemory(self.cpu_mem_used_delta, self.cpu_mem_peaked_delta, self.cpu_mem_used_prev),
            CellLoggerMemory(self.gpu_mem_used_delta, self.gpu_mem_peaked_delta, self.gpu_mem_used_prev),
            CellLoggerTime(self.time_delta)
        )
Пример #24
0
    __slots__ = ['item%s' % i for i in range(ITEM_NUM)]

    def __init__(self):
        for i in range(len(self.__slots__)):
            setattr(self, 'item%s' % i, i)


class NoSlots:
    def __init__(self):
        for i in range(ITEM_NUM):
            setattr(self, 'item%s' % i, i)


# 开始跟踪
tracemalloc.start()

obj = [NoSlots() for i in range(100)]
# 获取由 tracemalloc 模块跟踪的内存块的当前大小和峰值大小作为元组:(current: int, peak: int)
print(tracemalloc.get_traced_memory())

# 停止跟踪
tracemalloc.stop()

# 又开始跟踪,相当于重置
tracemalloc.start()
obj2 = [HaveSlots() for i in range(100)]
print(tracemalloc.get_traced_memory())

# (21832, 22219)    # 未定义__slots__字段,创建100个对象占用的内存约为 21832 字节
# (13760, 14147)    # 定义__slots__字段,创建100个对象占用的内存约为 13760 字节
Пример #25
0
def loop(*, size, times):
    for i in range(times):
        logger.info(
            "memory (current, peak) %s",
            str([t._format_size(x, False) for x in t.get_traced_memory()]))
        g(size)
Пример #26
0
def test_function_memory_usage():
    tracemalloc.start()
    instance_list = AwsInstances()
    current, peak_memory = tracemalloc.get_traced_memory()
    tracemalloc.stop()
    assert peak_memory / 10**6 <= 40
Пример #27
0
from main import main
import timeit
import tracemalloc

t = timeit.Timer("main(\"HDWallpaperFree\", 2)", "from main import main")
print(t.timeit(1))

tracemalloc.start()
main(screen_name="HDWallpaperFree", limit=2)
print("Current: %d, Peak %d" % tracemalloc.get_traced_memory())
Пример #28
0
def search_algo(n, maze, start, end):
    tracemalloc.start()
    start_time = time.time()
    current, peak = tracemalloc.get_traced_memory()
    print(f"Current memory usage before search {current / 10**6}MB")

    search_cost = 0
    pos = start  
    delay = 0.0
    grid, rect, screen, wid = make_screen(n)
    queue = [0] # BFS Queue (Pop Push function can be implemented in O(1) using 2 pointer)
    row = 0
    col = 0
    maze[row][col] = -1
    step_cost = 5
    moves = []
    parent = [-1]*(n*n) # stores parent of every node
    while pos != end:
        pos = queue.pop(0)
        row = pos//n
        col = pos%n
        expanded = True
        # try expanding current node and adding to the queue
        if (col + 1 < n) and (maze[row][col + 1] not in [-1,1]) :
            queue.append(row*n + col + 1)
            maze[row][col+1] = -1
            parent[queue[-1]] = pos
            expanded = True
            if queue[-1] == end:
                pos = end
        if (row + 1 < n) and (maze[row + 1][col] not in [-1,1]) :
            queue.append((row + 1)*n + col)
            maze[row+1][col] = -1
            parent[queue[-1]] = pos
            expanded = True
            if queue[-1] == end:
                pos = end
        if (col - 1 >= 0) and (maze[row][col - 1] not in [-1,1]) :
            queue.append(row*n + col - 1)
            maze[row][col-1] = -1
            parent[queue[-1]] = pos
            expanded = True
            if queue[-1] == end:
                pos = end
        if (row - 1 >= 0) and (maze[row - 1][col] not in [-1,1]) :
            queue.append((row - 1)*n + col)
            maze[row-1][col] = -1
            parent[queue[-1]] = pos
            expanded = True
            if queue[-1] == end:
                pos = end
        redraw_maze(grid, rect, screen, n, maze, pos, delay, wid, end)
        if expanded:
            search_cost+=step_cost
    curr_node = end
    # printing the path from start to end
    while parent[curr_node] != -1 :
        maze[curr_node//n][curr_node%n] = 2
        if parent[curr_node] == curr_node - 1 :
            moves.append("Right")
        elif parent[curr_node] == curr_node + 1 :
            moves.append("Left")
        elif parent[curr_node] == curr_node + n :
            moves.append("Up")
        elif parent[curr_node] == curr_node - n :
            moves.append("Down")
        curr_node = parent[curr_node]
        #redraw_maze(grid, rect, screen, n, maze, pos, delay, wid, end)
    moves = moves[::-1]
    maze[0][0] = 2
    redraw_maze(grid, rect, screen, n, maze, pos, delay, wid, end)

    end_time = time.time()
    current, peak = tracemalloc.get_traced_memory()
    print("Total Search Time : {} seconds".format(end_time-start_time))
    print(f"Peak Memory usage was was {peak / 10**6}MB")
    print(f"Total Expanding Search Cost is {search_cost} Units")
    print(f"Best Path Total Cost is {len(moves)*step_cost} Units")
    tracemalloc.stop()

    print(moves)
    popup_win(str(len(moves)*step_cost), "Score", "./final.png" , screen)
Пример #29
0
import tracemalloc as t

print("*start")
print([t._format_size(x, False) for x in t.get_traced_memory()])
t.start()

L = [[_ for _ in range(10000)] for i in range(100)]
print("*gen")
print([t._format_size(x, False) for x in t.get_traced_memory()])

snapshot = t.take_snapshot()
for stats in snapshot.statistics("traceback")[:3]:
    print(stats)

print("----------------------------------------")
snapshot = t.take_snapshot()
for stats in snapshot.statistics("lineno", cumulative=True)[:3]:
    print(stats)

t.stop()
print([t._format_size(x, False) for x in t.get_traced_memory()])
Пример #30
0
def run_memleak_test(bench, iterations, report):
    tracemalloc.start()

    starti = min(50, iterations // 2)
    endi = iterations

    malloc_arr = np.empty((endi,), dtype=np.int64)
    rss_arr = np.empty((endi,), dtype=np.int64)
    rss_peaks = np.empty((endi,), dtype=np.int64)
    nobjs_arr = np.empty((endi,), dtype=np.int64)
    garbage_arr = np.empty((endi,), dtype=np.int64)
    open_files_arr = np.empty((endi,), dtype=np.int64)
    rss_peak = 0

    p = psutil.Process()

    for i in range(endi):
        bench()

        gc.collect()

        rss = p.memory_info().rss
        malloc, peak = tracemalloc.get_traced_memory()
        nobjs = len(gc.get_objects())
        garbage = len(gc.garbage)
        open_files = len(p.open_files())
        print("{0: 4d}: pymalloc {1: 10d}, rss {2: 10d}, nobjs {3: 10d}, garbage {4: 4d}, files: {5: 4d}".format(
            i, malloc, rss, nobjs, garbage, open_files))

        malloc_arr[i] = malloc
        rss_arr[i] = rss
        if rss > rss_peak:
            rss_peak = rss
        rss_peaks[i] = rss_peak
        nobjs_arr[i] = nobjs
        garbage_arr[i] = garbage
        open_files_arr[i] = open_files

    print('Average memory consumed per loop: %1.4f bytes\n' %
          (np.sum(rss_peaks[starti+1:] - rss_peaks[starti:-1]) / float(endi - starti)))

    from matplotlib import pyplot as plt
    fig, (ax1, ax2, ax3) = plt.subplots(3)
    ax1b = ax1.twinx()
    ax1.plot(malloc_arr, 'r')
    ax1b.plot(rss_arr, 'b')
    ax1.set_ylabel('pymalloc', color='r')
    ax1b.set_ylabel('rss', color='b')

    ax2b = ax2.twinx()
    ax2.plot(nobjs_arr, 'r')
    ax2b.plot(garbage_arr, 'b')
    ax2.set_ylabel('total objects', color='r')
    ax2b.set_ylabel('garbage objects', color='b')

    ax3.plot(open_files_arr)
    ax3.set_ylabel('open file handles')

    if not report.endswith('.pdf'):
        report = report + '.pdf'
    fig.tight_layout()
    fig.savefig(report, format='pdf')
Пример #31
0
def tick():
    while True:
        logger.info(
            "%s",
            str([t._format_size(x, False) for x in t.get_traced_memory()]))
        time.sleep(0.2)
Пример #32
0
def do_benchmark(name, num_freq_bins, num_samples, dtype):
    print("###################################################")
    print("Starting benchmark '%s' (%d freq bins, %d samples, dtype=%s)" %
          (name, num_freq_bins, num_samples, dtype))
    print("###################################################")
    ###################################
    # DATASET SETUP
    ###################################
    dataset = Dataset("./", name)

    dataset.device.name = "Benchmark Data Generator"
    dataset.device.version = "1.0"

    freq_bins = np.logspace(3, 9, num_freq_bins)

    if not "test" in dataset:
        dataset.create_subset("test", freq_bins, False, dtype=dtype)

    ###################################
    # WRITE
    ###################################
    print("Start writing %d Samples with %d bins each" %
          (num_samples, num_freq_bins))
    tracemalloc.start()
    start_time = time.perf_counter()
    for i in range(num_samples):
        dataset["test"].append_sample(
            time=np.datetime64("now"),
            spectrum=np.random.random(num_freq_bins) * 30 - 80)
    stop_time = time.perf_counter()
    (current, peak) = tracemalloc.get_traced_memory()
    print("Peak memory consumption: %.3f MiB" % (peak / 1048576))
    print("Done. Took %.3f seconds" % (stop_time - start_time))
    print()
    tracemalloc.stop()

    ###################################
    # READ AND SUM
    ###################################
    print("Start reading all samples (%d) and summing up the spectrum" %
          dataset["test"].len())
    tracemalloc.start()
    start_time = time.perf_counter()
    sum = dataset["test"].spectrum[:].sum(1)
    stop_time = time.perf_counter()
    (current, peak) = tracemalloc.get_traced_memory()
    print("Peak memory consumption: %.3f MiB" % (peak / 1048576))
    print("Done. Took %.3f seconds" % (stop_time - start_time))
    print()
    tracemalloc.stop()

    ###################################
    # READ AND SUM (Chunked)
    ###################################
    print(
        "Start reading all (chunked) samples (%d) and summing up the spectrum"
        % dataset["test"].len())
    tracemalloc.start()
    start_time = time.perf_counter()

    chunk_size = 100
    for i in range(int(dataset["test"].len() / chunk_size)):
        sum = dataset["test"].spectrum[i * chunk_size:i * chunk_size +
                                       chunk_size].sum(1)

    stop_time = time.perf_counter()
    (current, peak) = tracemalloc.get_traced_memory()
    print("Peak memory consumption: %.3f MiB" % (peak / 1048576))
    print("Done. Took %.3f seconds" % (stop_time - start_time))
    print()
    tracemalloc.stop()

    dataset.close()
Пример #33
0
def train_and_predict(
    config,
    # Functions to not import all modules
    preprocess_data_inverse,
    fitted_power_transform,
    # Other
    iterated_model_train,
    iterated_model_predict,
    iterated_model_name,
    iterated_model_index,
    optimization_index,
    optimization_value,
    model_train_input,
    model_predict_input,
    model_test_inputs,
    models_test_outputs,
    models_test_outputs_unstandardized,
    data_abs_max,
    data_mean,
    data_std,
    last_undiff_value=None,
    final_scaler=None,
    pipe=None,
    semaphor=None,
) -> None | dict[str, Any]:
    """Inner function, that can run in parallel with multiprocessing.

    Note:
        config is just a dictionary passed as param, so cannot use dot syntax here.

    Args:
        Some values from predictit configuration.

    Returns:
        None | dict[str, Any]: Return dict of results or send data via multiprocessing.
    """

    logs_list = []
    warnings_list = []

    if config["multiprocessing"]:
        mylogging._misc.filter_warnings()
        mylogging.outer_warnings_filter(config["ignored_warnings"],
                                        config["ignored_warnings_class_type"])
        mylogging.config.BLACKLIST = config["ignored_warnings"]
        mylogging.config.OUTPUT = config["logger_output"]
        mylogging.config.LEVEL = config["logger_level"]
        mylogging.config.FILTER = config["logger_filter"]
        mylogging.config.COLORIZE = config["logger_color"]
        logs_redirect = mylogging.redirect_logs_and_warnings_to_lists(
            logs_list, warnings_list)

    if config["is_tested"]:
        import mypythontools

        mypythontools.tests.setup_tests(matplotlib_test_backend=True)

    if semaphor:
        semaphor.acquire()

    if config["trace_processes_memory"]:
        import tracemalloc

        tracemalloc.start()

    model_results = {"Name": iterated_model_name}

    result_name = (f"{iterated_model_name} - {optimization_value}"
                   if config["optimization"] else f"{iterated_model_name}")

    if (config["optimizeit"] and optimization_index == 0
            and iterated_model_name in config["models_parameters_limits"]):

        start_optimization = time.time()

        try:
            model_results[
                "Best optimized parameters"] = predictit.best_params.optimize(
                    iterated_model_train,
                    iterated_model_predict,
                    config["models_parameters"].get(iterated_model_name),
                    config["models_parameters_limits"][iterated_model_name],
                    model_train_input=model_train_input,
                    model_test_inputs=model_test_inputs,
                    models_test_outputs=models_test_outputs,
                    time_limit=config["optimizeit_limit"],
                    error_criterion=config["error_criterion"],
                    name=iterated_model_name,
                    iterations=config["iterations"],
                    fragments=config["fragments"],
                    details=config["optimizeit_details"],
                    plot=config["optimizeit_plot"],
                )

        except TimeoutError:
            model_results["Best optimized parameters"] = {}
            mylogging.traceback(
                f"Hyperparameters optimization of {iterated_model_name} didn't finished"
            )

        for k, l in model_results["Best optimized parameters"].items():

            if iterated_model_name not in config["models_parameters"]:
                config["models_parameters"][iterated_model_name] = {}
            config["models_parameters"][iterated_model_name][k] = l

        stop_optimization = time.time()
        model_results[
            "Hyperparameter optimization time"] = stop_optimization - start_optimization

    start = time.time()

    try:

        # If no parameters or parameters details, add it so no index errors later
        if iterated_model_name not in config["models_parameters"]:
            config["models_parameters"][iterated_model_name] = {}

        # Train all models
        trained_model = iterated_model_train(
            model_train_input,
            **config["models_parameters"][iterated_model_name])

        # Create predictions - out of sample
        one_reality_result = iterated_model_predict(model_predict_input,
                                                    trained_model,
                                                    config["predicts"])

        if np.isnan(np.sum(one_reality_result)) or one_reality_result is None:
            raise ValueError("NaN predicted from model.")

        # Remove wrong values out of scope to not be plotted
        one_reality_result[abs(one_reality_result) > 3 * data_abs_max] = np.nan

        # Do inverse data preprocessing
        if config["power_transformed"]:
            one_reality_result = fitted_power_transform(
                one_reality_result, data_std, data_mean)

        one_reality_result = preprocess_data_inverse(
            one_reality_result,
            final_scaler=final_scaler,
            last_undiff_value=last_undiff_value,
            standardizeit=config["standardizeit"],
            data_transform=config["data_transform"],
        )

        tests_results = np.zeros((config["repeatit"], config["predicts"]))
        test_errors_unstandardized = np.zeros(
            (config["repeatit"], config["predicts"]))
        test_errors = np.zeros(config["repeatit"])

        # Predict many values in test inputs to evaluate which models are best - do not inverse data preprocessing,
        # because test data are processed
        for repeat_iteration in range(config["repeatit"]):

            # Create in-sample predictions to evaluate if model is good or not
            tests_results[repeat_iteration] = iterated_model_predict(
                model_test_inputs[repeat_iteration],
                trained_model,
                predicts=config["predicts"],
            )

            if config["power_transformed"]:
                tests_results[repeat_iteration] = fitted_power_transform(
                    tests_results[repeat_iteration], data_std, data_mean)

            test_errors[
                repeat_iteration] = predictit.evaluate_predictions.compare_predicted_to_test(
                    tests_results[repeat_iteration],
                    models_test_outputs[repeat_iteration],
                    error_criterion=config["error_criterion"],
                )

            tests_results[repeat_iteration] = preprocess_data_inverse(
                tests_results[repeat_iteration],
                final_scaler=final_scaler,
                last_undiff_value=last_undiff_value,
                standardizeit=config["standardizeit"],
                data_transform=config["data_transform"],
            )

            test_errors_unstandardized[
                repeat_iteration] = predictit.evaluate_predictions.compare_predicted_to_test(
                    tests_results[repeat_iteration],
                    models_test_outputs_unstandardized[repeat_iteration],
                    error_criterion=config["error_criterion"],
                )

        model_results["Model error"] = test_errors.mean()
        model_results["Unstandardized model error"] = test_errors.mean()
        model_results["Results"] = one_reality_result
        model_results["Test errors"] = test_errors

        # For example tensorflow is not pickleable, so sending model from process would fail.
        # Trained models only if not multiprocessing
        if not ["multiprocessing"]:
            model_results["Trained model"] = trained_model

    except (Exception, ):
        results_array = np.zeros(config["predicts"])
        results_array.fill(np.nan)
        test_errors = np.zeros((config["repeatit"], config["predicts"]))
        test_errors.fill(np.nan)

        model_results["Model error"] = np.inf
        model_results["Unstandardized model error"] = np.inf
        model_results["Results"] = results_array
        model_results["Test errors"] = test_errors
        error_message = (
            f"Error in '{result_name}' model"
            if not config["optimization"] else
            f"Error in {iterated_model_name} model with optimized value: {optimization_value}"
        )

        mylogging.traceback(caption=error_message)

    finally:
        model_results["Index"] = (optimization_index, iterated_model_index)
        model_results["warnings_list"] = warnings_list
        model_results["logs_list"] = logs_list
        model_results["Model time [s]"] = time.time() - start

        if config["optimization_variable"]:
            model_results["Optimization value"] = optimization_value

        if config["trace_processes_memory"]:
            _, memory_peak_MB = tracemalloc.get_traced_memory()
            model_results["Memory Peak\n[MB]"] = memory_peak_MB / 10**6
            tracemalloc.stop()

        if config["multiprocessing"]:
            logs_redirect.close_redirect()

        if semaphor:
            semaphor.release()

        if config["multiprocessing"] == "process":
            pipe.send({f"{result_name}": model_results})
            pipe.close()

        else:
            return {f"{result_name}": model_results}
Пример #34
0
def run_memleak_test(bench, iterations, report):
    tracemalloc.start()

    starti = min(50, iterations // 2)
    endi = iterations

    malloc_arr = np.empty((endi, ), dtype=np.int64)
    rss_arr = np.empty((endi, ), dtype=np.int64)
    rss_peaks = np.empty((endi, ), dtype=np.int64)
    nobjs_arr = np.empty((endi, ), dtype=np.int64)
    garbage_arr = np.empty((endi, ), dtype=np.int64)
    open_files_arr = np.empty((endi, ), dtype=np.int64)
    rss_peak = 0

    p = psutil.Process()

    for i in range(endi):
        bench()

        gc.collect()

        rss = p.memory_info().rss
        malloc, peak = tracemalloc.get_traced_memory()
        nobjs = len(gc.get_objects())
        garbage = len(gc.garbage)
        open_files = len(p.open_files())
        print("{0: 4d}: pymalloc {1: 10d}, rss {2: 10d}, nobjs {3: 10d}, "
              "garbage {4: 4d}, files: {5: 4d}".format(i, malloc, rss, nobjs,
                                                       garbage, open_files))

        malloc_arr[i] = malloc
        rss_arr[i] = rss
        if rss > rss_peak:
            rss_peak = rss
        rss_peaks[i] = rss_peak
        nobjs_arr[i] = nobjs
        garbage_arr[i] = garbage
        open_files_arr[i] = open_files

    print('Average memory consumed per loop: {:1.4f} bytes\n'.format(
        np.sum(rss_peaks[starti + 1:] - rss_peaks[starti:-1]) /
        (endi - starti)))

    from matplotlib import pyplot as plt
    fig, (ax1, ax2, ax3) = plt.subplots(3)
    ax1b = ax1.twinx()
    ax1.plot(malloc_arr, 'r')
    ax1b.plot(rss_arr, 'b')
    ax1.set_ylabel('pymalloc', color='r')
    ax1b.set_ylabel('rss', color='b')

    ax2b = ax2.twinx()
    ax2.plot(nobjs_arr, 'r')
    ax2b.plot(garbage_arr, 'b')
    ax2.set_ylabel('total objects', color='r')
    ax2b.set_ylabel('garbage objects', color='b')

    ax3.plot(open_files_arr)
    ax3.set_ylabel('open file handles')

    if not report.endswith('.pdf'):
        report = report + '.pdf'
    fig.tight_layout()
    fig.savefig(report, format='pdf')
Пример #35
0
        get_factors_from_state(state, tb) for state in marked_states
    ]

    tracemalloc.start()

    qstate = grover_candecomp(marked_states,
                              backend=backend,
                              rank_threshold=rank_threshold,
                              compress_ratio=compress_ratio,
                              cp_tol=cp_tol,
                              cp_maxiter=cp_maxiter,
                              cp_inneriter=cp_inneriter,
                              num_als_init=num_als_init,
                              init_als=init_als,
                              mode=mode,
                              debug=debug)

    current_memory, peak_memory = tracemalloc.get_traced_memory()
    tracemalloc.stop()
    print(f'current_memory is {current_memory / (1024 * 1024)} MB')
    print(f'peak_memory is {peak_memory / (1024 * 1024)} MB')

    overall_item_fidelity = 0.
    for i in range(num_marked_states):
        item_fidelity = candecomp.fidelity(marked_states_factors[i],
                                           qstate.factors, tb)**2
        print(f"Fidelity for {i} is {item_fidelity}")
        overall_item_fidelity += item_fidelity
    print(f"Overall item fidelity is {overall_item_fidelity}")
    print(f"Fidelity average is {qstate.fidelity_avg ** 2}")
Пример #36
0
    n_outputs = len(set([row[-1] for row in train]))
    network = initialize_network(n_inputs, n_hidden, n_outputs)
    train_network(network, train, l_rate, n_epoch, n_outputs)
    predictions = list()
    for row in test:
        prediction = predict(network, row)
        predictions.append(prediction)
    return (predictions)


# Test Backprop on Seeds dataset

seed(1)
memoryuse = 0
tracemalloc.start()
current1, peak1 = tracemalloc.get_traced_memory()
# load and prepare data
filename = 'seeds_dataset.csv'
dataset = load_csv(filename)
for i in range(len(dataset[0]) - 1):
    str_column_to_float(dataset, i)
# convert class column to integers
str_column_to_int(dataset, len(dataset[0]) - 1)
# normalize input variables
minmax = dataset_minmax(dataset)
normalize_dataset(dataset, minmax)
# evaluate algorithm
n_folds = 5
l_rate = 0.3
# n_epoch = 10
# n_hidden = 3
Пример #37
0
    def loop(self):
        if self.cntr % 60 == 0:
            tracker = messaging.MsgTrack()
            stats_obj = stats.APRSDStats()
            pl = packets.PacketList()
            thread_list = APRSDThreadList()
            now = datetime.datetime.now()
            last_email = stats_obj.email_thread_time
            if last_email:
                email_thread_time = utils.strfdelta(now - last_email)
            else:
                email_thread_time = "N/A"

            last_msg_time = utils.strfdelta(now - stats_obj.aprsis_keepalive)

            current, peak = tracemalloc.get_traced_memory()
            stats_obj.set_memory(current)
            stats_obj.set_memory_peak(peak)

            try:
                login = self.config["aprs"]["login"]
            except KeyError:
                login = self.config["ham"]["callsign"]

            keepalive = (
                "{} - Uptime {} RX:{} TX:{} Tracker:{} Msgs TX:{} RX:{} "
                "Last:{} Email: {} - RAM Current:{} Peak:{} Threads:{}"
            ).format(
                login,
                utils.strfdelta(stats_obj.uptime),
                pl.total_recv,
                pl.total_tx,
                len(tracker),
                stats_obj.msgs_tx,
                stats_obj.msgs_rx,
                last_msg_time,
                email_thread_time,
                utils.human_size(current),
                utils.human_size(peak),
                len(thread_list),
            )
            LOG.info(keepalive)

            # See if we should reset the aprs-is client
            # Due to losing a keepalive from them
            delta_dict = utils.parse_delta_str(last_msg_time)
            delta = datetime.timedelta(**delta_dict)

            if delta > self.max_delta:
                #  We haven't gotten a keepalive from aprs-is in a while
                # reset the connection.a
                if not client.KISSClient.is_enabled(self.config):
                    LOG.warning("Resetting connection to APRS-IS.")
                    client.factory.create().reset()

            # Check version every hour
            delta = now - self.checker_time
            if delta > datetime.timedelta(hours=1):
                self.checker_time = now
                level, msg = utils._check_version()
                if level:
                    LOG.warning(msg)
        self.cntr += 1
        time.sleep(1)
        return True
Пример #38
0
if __name__ == '__main__':
    import glb
    import objgraph  
    import gc,tracemalloc
    tracemalloc.start()
    b_snapshot = tracemalloc.take_snapshot()
    for i in range(10):
        with glb.db_connect() as conn:
            with conn.cursor(as_dict=True) as cursor:
                cursor.execute("SELECT * FROM zhanbao_tbl WHERE  id=4274 order by id asc")
                row = cursor.fetchone()
                while row:
                    b1_snapshot = tracemalloc.take_snapshot()
                    try:
                        print('worker_no:'+ row['worker_no']+"\t"+ str(row['id']) +"     "+ str(tracemalloc.get_traced_memory()))
                        files_template_exec(row['id'],json.loads(row['config_txt']),row['worker_no'],glb.config['UPLOAD_FOLDER'] ,wx_queue=glb.msg_queue)  
                        print("====================================")
                        print('worker_no:'+ row['worker_no']+"\t"+ str(row['id']) +"     "+ str(tracemalloc.get_traced_memory()))
                        print("====================================")
                        snapshot2 = tracemalloc.take_snapshot()
                        top_stats = snapshot2.compare_to(b1_snapshot, 'lineno')
                        for stat in top_stats[:10]:
                            print(stat)
                        print("====================================")
                    except Exception as e:
                        print(e)
                    row = cursor.fetchone()
        gc.collect() 
        objgraph.show_most_common_types(limit=5)   
    ### 打印出对象数目最多的 50 个类型信息  
Пример #39
0
def launch_model(images,
                 labels,
                 adam_coef=0.01,
                 t_size=0.2,
                 batch_s=100,
                 nb_epoches=10):
    print(
        '------------------------------------------------------------------------------------------------\n'
    )
    print(
        '{0}\nAdam coef: {4}\tTrain size: {5}%\tPercentage of test data:{1}\tBatch size: {2}\tNb epoches:{3}'
        .format(os.path.basename(__file__), t_size, batch_s, nb_epoches,
                adam_coef, t_size))
    # Split dataset =  % training dataset 20% test_dataset
    X_train, X_test, y_train, y_test = train_test_split(images,
                                                        labels,
                                                        test_size=t_size,
                                                        random_state=11)

    # Transform training and test datasets into PyTorch dataset
    ## Tensors
    tensor_x_train = torch.Tensor(X_train)
    tensor_y_train = torch.Tensor(y_train)
    tensor_x_test = torch.Tensor(X_test)
    tensor_y_test = torch.Tensor(y_test)

    ## Convert labels float type into long type (labels need to be type long)
    tensor_y_train = tensor_y_train.long()
    tensor_y_test = tensor_y_test.long()

    ## Create TensorDataset
    tensorDataset_train = TensorDataset(tensor_x_train, tensor_y_train)
    tensorDataset_test = TensorDataset(tensor_x_test, tensor_y_test)

    ## Create dataloaders
    train_loader = DataLoader(tensorDataset_train,
                              batch_size=batch_s)  # batch_s samples / batch
    test_loader = DataLoader(tensorDataset_test, batch_size=batch_s)

    # Start timer and save memory capacity
    start = time.time()
    tracemalloc.start()

    # Init model
    network = cnn.CNN()
    optimizer = optim.Adam(network.parameters(), lr=adam_coef)

    # Launch epoches
    for epoch in range(nb_epoches):
        total_loss = 0
        total_correct = 0

        for batch in train_loader:  # Get batch
            images, labels = batch
            preds = network(images)  # Pass Batch
            loss = F.cross_entropy(preds, labels)  # Calculate Loss

            # Update hyperparameters
            optimizer.zero_grad()
            loss.backward()  # Calculate Gradients
            optimizer.step()  # Update Weights

            # Save loss and number of good prediction / batch
            total_loss += loss.item()
            total_correct += get_num_correct(preds, labels)
        print("Epoch:", epoch, "Total_correct:", total_correct, "Loss:",
              total_loss)

    # Calculate accurancy for test dataset
    with torch.no_grad():
        test_preds = get_all_preds(network, test_loader)
    all_labels = tensor_y_test
    preds_correct = get_num_correct(test_preds, all_labels)
    print('Total correct:{0}/{1}'.format(preds_correct, len(y_test)))
    accuracy = preds_correct * 100 / len(y_test)
    timer = time.time() - start
    current, peak = tracemalloc.get_traced_memory()
    diff = peak - current
    print('Accuracy: {0} %'.format(accuracy))
    print(
        '------------------------------------------------------------------------------------------------\n'
    )
    return network, {
        'Epoches': nb_epoches,
        'Batchs': batch_s,
        'Accuracies': float("{:.2f}".format(accuracy)),
        'Test_size': t_size,
        'Adam_coef': adam_coef,
        'Timer': float("{:.4f}".format(timer)),
        'Mem_current': current,
        'Mem_peak': peak,
        'Mem_diff': diff
    }
Пример #40
0
print(len(word))
obj.insert(word)
print(obj.search(word))
prefix1 = 'asdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiasdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqoiwjdjksbjdnbcnmxbvhfdgvgjghasgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviuefasdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqoiwjdjksbjdnbcnmxbvhfdgvgjghasgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviuefasdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqoiwjdjksbjdnbcnmxbvhfdgvgjghasgfsagdstxfzxgcvgquyrwf'
print(obj.startsWith(prefix1))
prefix2 = 'asdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefh'
print(obj.startsWith(prefix2))
prefix3 = 'asdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiasdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqoiwjdjksbjdnbcnmxbvhfdgvgjghasgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviuefasdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqoiwjdjksbjdnbcnmxbvhfdgvgjghasgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviuefasdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqoiwjdjksbjdnbcnmxbvhfdgvgjghasgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviuefasdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqasdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqoiwjdjksbjdnbcnmxbvhfdgvasdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqghasgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxgtyeuyriwpasdksdklfnsdmvncxmvnjdgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviuefoiwjdjksbjdnbcnmxbvhfdgvgjghasgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviuefqoqwjpowqpqoiwjdjksbjdnbcnmxbvhfdgvgjghasgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviue'
print(obj.startsWith(prefix3))
word2 = 'jnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfiddsdfsdfsdfsdfiddsdfsdfidjdksdndvlndslfwoiefhowfcslmnvdslfwoiefhowfcslmnvkjsdhowfcslmnvkjsdbfkjcslmnvkjsdbfkjsdbfjsdhfiuwefgquyvrgquyvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqoiwjdjksbjdnbcnmxbvhfdgvgjghasgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqoiwjdjksbjdnbcnmxbvhtxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviuefasdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqoiwjdjksbjdnbcnmxbvhfdgvgjghasqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviuefasdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqasdfsddsvrtgbdidddddytjnhgdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqwjpowqpqoiwjdjksbjdnbcnmxbvhfdgvasdfsddsvrtgbdidddddytjnhgtfgghdddddrfeferdhtyjretyjnddsdfsdfidjdksdndvlndslfwoiefhowfcdslmnvkjsdbfkjsdbfjsdhfiuwefgquywesdfsdfliwopfihwlfcnsdvccjbvhsdfjhsfiqoqghasgfsagdstxfzxgcvgquyrwfeutyqwreuywdksdklfnsdmvncxmvnjdgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviuefoiwjdjksbjdnbcnmxbvhfdgvgjghasgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviuefqoqwjpowqpqoiwjdjksbjdnbcnmxbvhfdgvgjghasgfsagdstxfzxgcvgquyrwfeutyqwreuywqtduhsvajvzbxbxbxbhshsdfgtyeuyriwpasdksdklfnsdmvncxmvnfjdgviuef'
print(len(word2))
obj.insert(word2)
word3 = 'dewoidhdslknforhgferiubgvkjfbvkasdjdbfkjasdfjksdbfkjsabhfuiweihfiuqwegfihbvhsbvmnxcbncnndhjdhxcycywioqwiskkdfbnsdkjhfreifgiwreuhfdsjbvckcjbvkjsdfkujshfiuwegfiweufedjihfdskjfnsdkjbfviuerhbvyrefbvifdhbsdkjfbvksdubfiweufhiewuhasdjkcaskjdqiuqwiqwouwiudhwiudbsutcdstytdtcrsfytcrsadtycfasdgcvsd'
print(len(word3))
obj.insert(word3)
prefix4 = 'dewoidhdslknforhgferiubgvkjfbvkasdjdbfkjasdfjksdbfkjsabhfuiweihfiuqwegfihbvhsbvmnxcbncnndhjdhxcycywioqwiskkdfbnsdkjhfreifgiwreuhfdsjbvckcjbvkjsdfkujshfiuwegfiweufedjihfdskjfnsdkjbfviuerhbvyrefbvifdhbsdkjfbvksdubfiweufhiewuhasdjkcaskjdqiuqwiqwouwiudhwiudbsutcdstytdtcrsfytcrsadtycfasdcxvdsfdv'
print(obj.startsWith(prefix4))
prefix5 = 'dewoidhdslknforhgferiubgvkjfbvkasdjdbfkjasdfjksdbfkjsabhfuiweihfiuqwegfihbvhsbvmnxcbncnndhjdhxcycywioqwiskkdfbnsdkjhfreifgiwreuhfdsjbvckcjbvkjsdfkujshfiuwegfiweufedjihfdskjfnsdkjbfviuerhbvyrefbvifdhbsdkjfbvksdubfiweufhiewuhasdjkcaskjdqiuqwiqwouwiudhwiudbsutcdstytdtcrsfytcrsadtycfasd'
print(obj.startsWith(prefix5))
current, peak = tracemalloc.get_traced_memory()
print(
    f"Current memory usage is {current / 10**6}MB; Peak was {peak / 10**6}MB")

snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics('traceback')
""" print("[ Top 10 ]")
for stat in top_stats[:10]:
    print('메모리 블록이 할당된 곳:', stat.traceback, '/ 메모리 블록 수(int):', stat.count, '/ 총 메모리 블록의 바이트 단위 크기 (int):', stat.size) """
display_top(snapshot)
tracemalloc.stop()
Пример #41
0
def tick():
    while True:
        logger.info("%s", str([t._format_size(x, False) for x in t.get_traced_memory()]))
        time.sleep(0.2)
Пример #42
0
# ------------------------------------------------------------
# Part 4: Thinking in functions
# ------------------------------------------------------------
import tracemalloc

tracemalloc.start()

import readrides

rows = readrides.read_as_instances('ctabus.csv')

# 1. How many bus routes exist?
#    Solution: Read into a set

routes = {row.route for row in rows}
print(len(routes), 'routes')

# 2. How many people rode route 22 on 9-Apr-2007?
#    Solution: Build an index mapping routes, dates to ride totals.
#    You'll use (route,date) as a composite key.

by_route_and_date = {(row.route, row.date): row.rides for row in rows}
print('Route 22 on 9-Apr-2007:', by_route_and_date['22', '04/09/2007'])

# 3. Find out what day the route 22 bus had the highest ridership

rt22 = [(row.rides, row.date) for row in rows if row.route == '22']
print('Max ridership:', max(rt22))

print('Current %s, Peak %s' % tracemalloc.get_traced_memory())
Пример #43
0
 def dump_memory(what):
     print("***** %s *****" % what)
     size, peak_size = tracemalloc.get_traced_memory()
     print("traced: %.1f KB (peak: %.1f KB)" % (size / 1024., peak_size / 1024.))
Пример #44
0
os.chdir(script_path)

#
n = "\n"
print(
#    unvivtool.__doc__, n,
    unvivtool, n,
    dir(unvivtool), n,

    flush=True
)


# tracemalloc -- BEGIN ---------------------------------------------------------
# tracemalloc.stop()
first_size, first_peak = tracemalloc.get_traced_memory()

if sys.version_info[0:2] >= (3, 9):
    tracemalloc.reset_peak()

tracemalloc.start()
# tracemalloc -- END -----------------------------------------------------------


count_successful_tests = 0
print("")

# Decode
if args.cmd[0] == "d":
    vivfile = "tests/car.viv"
    outdir = "tests"
Пример #45
0
def main():
    config = ConfigParser.ConfigParser()
    my_path = Path(__file__).parent.parent
    ini_path = os.path.join(my_path, 'config', 'server.ini')
    config.read(ini_path)
    YAML_DIR = config['SERVICE']['yaml_directory']
    METRIC_YAML = config['SERVICE']['metrics_yaml']
    METRIC_YML_PATH = os.path.join(my_path, YAML_DIR, METRIC_YAML)
    SPDX_URL = config['EXTERNAL']['spdx_license_github']
    DATACITE_API_REPO = config['EXTERNAL']['datacite_api_repo']
    RE3DATA_API = config['EXTERNAL']['re3data_api']
    METADATACATALOG_API = config['EXTERNAL']['metadata_catalog']
    isDebug = config.getboolean('SERVICE', 'debug_mode')
    data_files_limit = int(config['SERVICE']['data_files_limit'])
    metric_specification = config['SERVICE']['metric_specification']

    preproc = Preprocessor()
    preproc.retrieve_metrics_yaml(METRIC_YML_PATH, data_files_limit,
                                  metric_specification)
    print('Total metrics defined: {}'.format(preproc.get_total_metrics()))

    isDebug = config.getboolean('SERVICE', 'debug_mode')
    preproc.retrieve_licenses(SPDX_URL, isDebug)
    preproc.retrieve_datacite_re3repos(RE3DATA_API, DATACITE_API_REPO, isDebug)
    preproc.retrieve_metadata_standards(METADATACATALOG_API, isDebug)
    preproc.retrieve_science_file_formats(isDebug)
    preproc.retrieve_long_term_file_formats(isDebug)

    print('Total SPDX licenses : {}'.format(preproc.get_total_licenses()))
    print('Total re3repositories found from datacite api : {}'.format(
        len(preproc.getRE3repositories())))
    print('Total subjects area of imported metadata standards : {}'.format(
        len(preproc.metadata_standards)))
    start = False
    usedatacite = True
    tracemalloc.start()
    n = 1
    for identifier in testpids:

        print(identifier)
        print(n)
        n += 1
        if identifier == startpid or not startpid:
            start = True
        if start:
            ft = FAIRCheck(uid=identifier,
                           test_debug=debug,
                           metadata_service_url=metadata_service_endpoint,
                           metadata_service_type=metadata_service_type,
                           use_datacite=usedatacite)

            #ft = FAIRCheck(uid=identifier,  test_debug=True, use_datacite=usedatacite)

            uid_result, pid_result = ft.check_unique_persistent()
            ft.retrieve_metadata_embedded(ft.extruct_result)
            include_embedded = True
            if ft.repeat_pid_check:
                uid_result, pid_result = ft.check_unique_persistent()
            ft.retrieve_metadata_external()

            core_metadata_result = ft.check_minimal_metatadata()
            content_identifier_included_result = ft.check_content_identifier_included(
            )
            access_level_result = ft.check_data_access_level()
            license_result = ft.check_license()
            relatedresources_result = ft.check_relatedresources()
            check_searchable_result = ft.check_searchable()
            data_content_metadata = ft.check_data_content_metadata()
            data_file_format_result = ft.check_data_file_format()
            community_standards_result = ft.check_community_metadatastandards()
            data_provenance_result = ft.check_data_provenance()
            formal_representation_result = ft.check_formal_metadata()
            semantic_vocabulary_result = ft.check_semantic_vocabulary()
            metadata_preserved_result = ft.check_metadata_preservation()
            standard_protocol_metadata_result = ft.check_standardised_protocol_metadata(
            )
            standard_protocol_data_result = ft.check_standardised_protocol_data(
            )

            results = [
                uid_result, pid_result, core_metadata_result,
                content_identifier_included_result, check_searchable_result,
                access_level_result, formal_representation_result,
                semantic_vocabulary_result, license_result,
                data_file_format_result, data_provenance_result,
                relatedresources_result, community_standards_result,
                data_content_metadata, metadata_preserved_result,
                standard_protocol_data_result,
                standard_protocol_metadata_result
            ]
            #results=[core_metadata_result,uid_result, pid_result]
            #print(ft.metadata_merged)
            debug_messages = ft.get_log_messages_dict()
            ft.logger_message_stream.flush()
            ft.get_assessment_summary(results)
            for res_k, res_v in enumerate(results):
                if ft.isDebug:
                    debug_list = debug_messages.get(res_v['metric_identifier'])
                    #debug_list= ft.msg_filter.getMessage(res_v['metric_identifier'])
                    if debug_list is not None:
                        results[res_k]['test_debug'] = debug_messages.get(
                            res_v['metric_identifier'])
                    else:
                        results[res_k]['test_debug'] = [
                            'INFO: No debug messages received'
                        ]
                else:
                    results[res_k]['test_debug'] = ['INFO: Debugging disabled']
                    debug_messages = {}
            print(json.dumps(results, indent=4, sort_keys=True))
            #remove unused logger handlers and filters to avoid memory leaks
            ft.logger.handlers = [ft.logger.handlers[-1]]
            #ft.logger.filters = [ft.logger.filters]
            current, peak = tracemalloc.get_traced_memory()
            print(
                f"Current memory usage is {current / 10 ** 6}MB; Peak was {peak / 10 ** 6}MB"
            )
            snapshot = tracemalloc.take_snapshot()
            top_stats = snapshot.statistics('traceback')

            # pick the biggest memory block
            stat = top_stats[0]
            print("%s memory blocks: %.1f KiB" %
                  (stat.count, stat.size / 1024))
            for line in stat.traceback.format():
                print(line)

            for i, stat in enumerate(snapshot.statistics('filename')[:5], 1):
                print(i, str(stat))

            #preproc.logger.
            gc.collect()
    tracemalloc.stop()
Пример #46
0
 def func():
     size, max_size = tracemalloc.get_traced_memory()
     print("Traced memory: %.1f MB"
           % (size / 1024.0 ** 2))
Пример #47
0
    def _worker(self, bench, sample_func):
        args = self.args
        loops = args.loops
        metadata = dict(self.metadata)
        start_time = perf.monotonic_clock()

        calibrate = (not loops)
        if calibrate:
            loops, calibrate_warmups = self._calibrate(bench, sample_func)
        else:
            if perf.python_has_jit():
                # With a JIT, continue to calibrate during warmup
                calibrate = True
            calibrate_warmups = None

        if args.track_memory:
            if MS_WINDOWS:
                from perf._win_memory import get_peak_pagefile_usage
            else:
                from perf._memory import PeakMemoryUsageThread
                mem_thread = PeakMemoryUsageThread()
                mem_thread.start()

        if args.tracemalloc:
            import tracemalloc
            tracemalloc.start()

        if args.warmups:
            loops, warmups = self._run_bench(bench, sample_func, loops,
                                             args.warmups,
                                             is_warmup=True, calibrate=calibrate)
        else:
            warmups = []
        if calibrate_warmups:
            warmups = calibrate_warmups + warmups
        loops, samples = self._run_bench(bench, sample_func, loops,
                                         args.samples)

        if args.tracemalloc:
            traced_peak = tracemalloc.get_traced_memory()[1]
            tracemalloc.stop()

            if not traced_peak:
                raise RuntimeError("tracemalloc didn't trace any Python "
                                   "memory allocation")

            # drop timings, replace them with the memory peak
            metadata['unit'] = 'byte'
            warmups = None
            samples = (float(traced_peak),)

        if args.track_memory:
            if MS_WINDOWS:
                mem_peak = get_peak_pagefile_usage()
            else:
                mem_thread.stop()
                mem_peak = mem_thread.peak_usage

            if not mem_peak:
                raise RuntimeError("failed to get the memory peak usage")

            # drop timings, replace them with the memory peak
            metadata['unit'] = 'byte'
            warmups = None
            samples = (float(mem_peak),)

        duration = perf.monotonic_clock() - start_time
        metadata['duration'] = duration
        metadata['name'] = self.name
        metadata['loops'] = loops
        if self.inner_loops is not None and self.inner_loops != 1:
            metadata['inner_loops'] = self.inner_loops

        run = perf.Run(samples, warmups=warmups, metadata=metadata)
        bench.add_run(run)
        self._display_result(bench, check_unstable=False)

        # Save loops into args
        args.loops = loops
Пример #48
0
def test_reproject_3D_memory():

    pytest.importorskip('reproject')

    tracemalloc.start()

    snap1 = tracemalloc.take_snapshot()

    # create a 64 MB cube
    cube,_ = utilities.generate_gaussian_cube(shape=[200,200,200])
    sz = _.dtype.itemsize

    # check that cube is loaded into memory
    snap2 = tracemalloc.take_snapshot()
    diff = snap2.compare_to(snap1, 'lineno')
    diffvals = np.array([dd.size_diff for dd in diff])
    # at this point, the generated cube should still exist in memory
    assert diffvals.max()*u.B >= 200**3*sz*u.B

    wcs_in = cube.wcs
    wcs_out = wcs_in.deepcopy()
    wcs_out.wcs.ctype = ['GLON-SIN', 'GLAT-SIN', cube.wcs.wcs.ctype[2]]
    wcs_out.wcs.crval = [0.001, 0.001, cube.wcs.wcs.crval[2]]
    wcs_out.wcs.crpix = [2., 2., cube.wcs.wcs.crpix[2]]

    header_out = (wcs_out.to_header())
    header_out['NAXIS'] = 3
    header_out['NAXIS1'] = int(cube.shape[2]/2)
    header_out['NAXIS2'] = int(cube.shape[1]/2)
    header_out['NAXIS3'] = cube.shape[0]

    # First the unfilled reprojection test: new memory is allocated for
    # `result`, but nowhere else
    result = cube.reproject(header_out, filled=False)

    snap3 = tracemalloc.take_snapshot()
    diff = snap3.compare_to(snap2, 'lineno')
    diffvals = np.array([dd.size_diff for dd in diff])
    # result should have the same size as the input data, except smaller in two dims
    # make sure that's all that's allocated
    assert diffvals.max()*u.B >= 200*100**2*sz*u.B
    assert diffvals.max()*u.B < 200*110**2*sz*u.B

    # without masking the cube, nothing should change
    result = cube.reproject(header_out, filled=True)

    snap4 = tracemalloc.take_snapshot()
    diff = snap4.compare_to(snap3, 'lineno')
    diffvals = np.array([dd.size_diff for dd in diff])
    assert diffvals.max()*u.B <= 1*u.MB

    assert result.wcs.wcs.crval[0] == 0.001
    assert result.wcs.wcs.crpix[0] == 2.


    # masking the cube will force the fill to create a new in-memory copy
    mcube = cube.with_mask(cube > 0.1*cube.unit)
    # `_is_huge` would trigger a use_memmap
    assert not mcube._is_huge
    assert mcube.mask.any()

    # take a new snapshot because we're not testing the mask creation
    snap5 = tracemalloc.take_snapshot()
    tracemalloc.stop()
    tracemalloc.start() # stop/start so we can check peak mem use from here
    current_b4, peak_b4 = tracemalloc.get_traced_memory()
    result = mcube.reproject(header_out, filled=True)
    current_aftr, peak_aftr = tracemalloc.get_traced_memory()


    snap6 = tracemalloc.take_snapshot()
    diff = snap6.compare_to(snap5, 'lineno')
    diffvals = np.array([dd.size_diff for dd in diff])
    # a duplicate of the cube should have been created by filling masked vals
    # (this should be near-exact since 'result' should occupy exactly the
    # same amount of memory)
    assert diffvals.max()*u.B <= 1*u.MB #>= 200**3*sz*u.B
    # the peak memory usage *during* reprojection will have that duplicate,
    # but the memory gets cleaned up afterward
    assert (peak_aftr-peak_b4)*u.B >= (200**3*sz*u.B + 200*100**2*sz*u.B)

    assert result.wcs.wcs.crval[0] == 0.001
    assert result.wcs.wcs.crpix[0] == 2.
 def log_func():
     nonlocal diff
     size, max_size = tracemalloc.get_traced_memory()
     diff = (size - old_size)
Пример #50
0
 def printMem(self):
     current, peak = tracemalloc.get_traced_memory()
     print(f"Current memory usage is {current / 10**6}MB;")
     return
Пример #51
0
def handle_traceback(sig, frame):
    logger.info(
        "memory (current, peak) %s", str([t._format_size(x, False) for x in t.get_traced_memory()])
    )
    import traceback
    traceback.print_stack(limit=5)
Пример #52
0
def read_write(plot=False):
    # mesh = generate_tetrahedral_mesh()
    mesh = generate_triangular_mesh()
    print(mesh)
    mem_size = mesh.points.nbytes + mesh.cells[0].data.nbytes
    mem_size /= 1024.0 ** 2
    print(f"mem_size: {mem_size:.2f} MB")

    formats = {
        "Abaqus": (meshio.abaqus.write, meshio.abaqus.read, ["out.inp"]),
        "Ansys (ASCII)": (
            lambda f, m: meshio.ansys.write(f, m, binary=False),
            meshio.ansys.read,
            ["out.ans"],
        ),
        # "Ansys (binary)": (
        #     lambda f, m: meshio.ansys.write(f, m, binary=True),
        #     meshio.ansys.read,
        #     ["out.ans"],
        # ),
        "AVS-UCD": (meshio.avsucd.write, meshio.avsucd.read, ["out.ucd"]),
        # "CGNS": (meshio.cgns.write, meshio.cgns.read, ["out.cgns"]),
        "Dolfin-XML": (meshio.dolfin.write, meshio.dolfin.read, ["out.xml"]),
        "Exodus": (meshio.exodus.write, meshio.exodus.read, ["out.e"]),
        # "FLAC3D": (meshio.flac3d.write, meshio.flac3d.read, ["out.f3grid"]),
        "Gmsh 4.1 (ASCII)": (
            lambda f, m: meshio.gmsh.write(f, m, binary=False),
            meshio.gmsh.read,
            ["out.msh"],
        ),
        "Gmsh 4.1 (binary)": (
            lambda f, m: meshio.gmsh.write(f, m, binary=True),
            meshio.gmsh.read,
            ["out.msh"],
        ),
        "MDPA": (meshio.mdpa.write, meshio.mdpa.read, ["out.mdpa"]),
        "MED": (meshio.med.write, meshio.med.read, ["out.med"]),
        "Medit": (meshio.medit.write, meshio.medit.read, ["out.mesh"]),
        "MOAB": (meshio.h5m.write, meshio.h5m.read, ["out.h5m"]),
        "Nastran": (meshio.nastran.write, meshio.nastran.read, ["out.bdf"]),
        "OBJ": (meshio.obj.write, meshio.obj.read, ["out.obj"]),
        "OFF": (meshio.off.write, meshio.off.read, ["out.off"]),
        "Permas": (meshio.permas.write, meshio.permas.read, ["out.dato"]),
        "PLY (binary)": (
            lambda f, m: meshio.ply.write(f, m, binary=True),
            meshio.ply.read,
            ["out.ply"],
        ),
        "PLY (ASCII)": (
            lambda f, m: meshio.ply.write(f, m, binary=False),
            meshio.ply.read,
            ["out.ply"],
        ),
        "STL (binary)": (
            lambda f, m: meshio.stl.write(f, m, binary=True),
            meshio.stl.read,
            ["out.stl"],
        ),
        "STL (ASCII)": (
            lambda f, m: meshio.stl.write(f, m, binary=False),
            meshio.stl.read,
            ["out.stl"],
        ),
        # "TetGen": (meshio.tetgen.write, meshio.tetgen.read, ["out.node", "out.ele"],),
        "VTK (binary)": (
            lambda f, m: meshio.vtk.write(f, m, binary=True),
            meshio.vtk.read,
            ["out.vtk"],
        ),
        "VTK (ASCII)": (
            lambda f, m: meshio.vtk.write(f, m, binary=False),
            meshio.vtk.read,
            ["out.vtk"],
        ),
        "VTU (binary, uncompressed)": (
            lambda f, m: meshio.vtu.write(f, m, binary=True, compression=None),
            meshio.vtu.read,
            ["out.vtu"],
        ),
        "VTU (binary, zlib)": (
            lambda f, m: meshio.vtu.write(f, m, binary=True, compression="zlib"),
            meshio.vtu.read,
            ["out.vtu"],
        ),
        "VTU (binary, LZMA)": (
            lambda f, m: meshio.vtu.write(f, m, binary=True, compression="lzma"),
            meshio.vtu.read,
            ["out.vtu"],
        ),
        "VTU (ASCII)": (
            lambda f, m: meshio.vtu.write(f, m, binary=False),
            meshio.vtu.read,
            ["out.vtu"],
        ),
        "Wavefront .obj": (meshio.obj.write, meshio.obj.read, ["out.obj"]),
        # "wkt": ".wkt",
        "XDMF (binary)": (
            lambda f, m: meshio.xdmf.write(f, m, data_format="Binary"),
            meshio.xdmf.read,
            ["out.xdmf", "out0.bin", "out1.bin"],
        ),
        "XDMF (HDF, GZIP)": (
            lambda f, m: meshio.xdmf.write(f, m, data_format="HDF", compression="gzip"),
            meshio.xdmf.read,
            ["out.xdmf", "out.h5"],
        ),
        "XDMF (HDF, uncompressed)": (
            lambda f, m: meshio.xdmf.write(f, m, data_format="HDF", compression=None),
            meshio.xdmf.read,
            ["out.xdmf", "out.h5"],
        ),
        "XDMF (XML)": (
            lambda f, m: meshio.xdmf.write(f, m, data_format="XML"),
            meshio.xdmf.read,
            ["out.xdmf"],
        ),
    }

    # formats = {
    #     # "VTK (ASCII)": formats["VTK (ASCII)"],
    #     # "VTK (binary)": formats["VTK (binary)"],
    #     # "VTU (ASCII)": formats["VTU (ASCII)"],
    #     # "VTU (binary)": formats["VTU (binary)"],
    #     # "Gmsh 4.1 (binary)": formats["Gmsh 4.1 (binary)"],
    #     # "FLAC3D": formats["FLAC3D"],
    #     "MDPA": formats["MDPA"],
    # }

    # max_key_length = max(len(key) for key in formats)

    elapsed_write = []
    elapsed_read = []
    file_sizes = []
    peak_memory_write = []
    peak_memory_read = []

    print()
    print(
        "format                      "
        + "write (s)    "
        + "read(s)      "
        + "file size    "
        + "write mem    "
        + "read mem "
    )
    print()
    with tempfile.TemporaryDirectory() as directory:
        directory = pathlib.Path(directory)
        for name, (writer, reader, filenames) in formats.items():
            filename = directory / filenames[0]

            tracemalloc.start()
            t = time.time()
            writer(filename, mesh)
            # snapshot = tracemalloc.take_snapshot()
            elapsed_write.append(time.time() - t)
            peak_memory_write.append(tracemalloc.get_traced_memory()[1])
            tracemalloc.stop()

            file_sizes.append(sum(os.stat(directory / f).st_size for f in filenames))

            tracemalloc.start()
            t = time.time()
            reader(filename)
            elapsed_read.append(time.time() - t)
            peak_memory_read.append(tracemalloc.get_traced_memory()[1])
            tracemalloc.stop()
            print(
                "{:<26}  {:e} {:e} {:e} {:e} {:e}".format(
                    name,
                    elapsed_write[-1],
                    elapsed_read[-1],
                    file_sizes[-1] / 1024.0 ** 2,
                    peak_memory_write[-1] / 1024.0 ** 2,
                    peak_memory_read[-1] / 1024.0 ** 2,
                )
            )

    names = list(formats.keys())
    # convert to MB
    file_sizes = np.array(file_sizes)
    file_sizes = file_sizes / 1024.0 ** 2
    peak_memory_write = np.array(peak_memory_write)
    peak_memory_write = peak_memory_write / 1024.0 ** 2
    peak_memory_read = np.array(peak_memory_read)
    peak_memory_read = peak_memory_read / 1024.0 ** 2

    if plot:
        plot_speed(names, elapsed_write, elapsed_read)
        plot_file_sizes(names, file_sizes, mem_size)
        plot_memory_usage(names, peak_memory_write, peak_memory_read, mem_size)
Пример #53
0
def main():
    tracemalloc.start()
    print(createDict(50000))
    current, peak = tracemalloc.get_traced_memory()
    print(f"Current memory usage is {current / 10**6}MB; Peak was {peak / 10**6}MB")
    tracemalloc.stop()
Пример #54
0
def compute(polygon,
            polygon_id,
            s_id,
            t_id,
            s,
            t,
            algorithm_list,
            run_timeout=3600,
            max_time=5,
            min_runs=5,
            max_runs=20):
    """Compute the benchmark for one start/end pair in one polygon with all known algorithms."""
    from socket import gethostname
    from datetime import datetime
    from gsp import makestep_shortest_path, delaunay_shortest_path, lee_preparata_shortest_path, \
        trapezoid_shortest_path
    import gc
    import tracemalloc
    import sys
    import traceback
    from time import process_time as timer

    m_run = model.Run.create(
        polygon_id=polygon_id,
        host=model.Host.create_or_get(name=gethostname())[0],
        start=datetime.now(),
        end=datetime.now(),
        s=model.PolygonPoint.get(id=s_id),
        t=model.PolygonPoint.get(id=t_id),
        version=model.Version.get())
    logging.debug('Created run "%s"', m_run)

    for algorithm, sp in dict(
            delaunay=delaunay_shortest_path,
            makestep=makestep_shortest_path,
            trapezoid=trapezoid_shortest_path,
            lee_preparata=lee_preparata_shortest_path).items():
        if algorithm not in algorithm_list:
            continue

        m_algorithm, _ = model.Algorithm.create_or_get(name=algorithm)
        logging.info('Running algorithm "%s"', m_algorithm)
        gc.collect()
        tracemalloc.start()
        try:
            signal.alarm(run_timeout)
            path = list(sp(polygon, s, t))
            signal.alarm(0)
        except BaseException:
            traceback.print_exc(file=sys.stderr)
            continue
        else:
            memory = tracemalloc.get_traced_memory()

            gc.collect()
            tracemalloc.stop()

            m_instance = model.Instance.create(run=m_run,
                                               algorithm=m_algorithm,
                                               memory=memory[1] - memory[0],
                                               path_length=len(path))
            logging.debug('Saved instance "%s"', m_instance)

            logging.debug('Creating resulting path: "%s"', path)
            for i, point in enumerate(path):
                try:
                    m_point = model.Point.get(x=point.x, y=point.y)
                except model.Point.DoesNotExist:
                    for tmp_polygon_point in m_run.polygon.polygon_points:
                        if tmp_polygon_point.point.as_geometry() == point:
                            m_polygon_point = tmp_polygon_point
                            break
                else:
                    m_polygon_point = model.PolygonPoint.get(
                        point=m_point,
                        polygon_id=polygon_id,
                        is_vertex=isinstance(point, PolygonPoint))
                model.PathPoint.create(instance=m_instance,
                                       index=i,
                                       polygon_point=m_polygon_point)

            for property, value in sp.properties.items():
                m_property, _ = model.PropertyName.create_or_get(name=property)
                if isinstance(value, int):
                    model.IntegerProperty.create(instance=m_instance,
                                                 name=m_property,
                                                 value=value)

        total_time = 0
        runs = 0

        times = []

        while runs < min_runs or max_time > total_time and runs < max_runs:
            try:
                signal.alarm(run_timeout)
                gc.disable()
                start = timer()
                list(sp(polygon, s, t))
                time = timer() - start
                gc.enable()
                signal.alarm(0)
            except BaseException:
                traceback.print_exc(file=sys.stderr)
                break
            else:
                times.append(time)
                total_time += time
                runs += 1

        if len(times) > 0:
            with model.db.atomic():
                model.Time.insert_many(
                    dict(instance=m_instance, time=t)
                    for t in times).execute()

            m_instance.median_time = median(times)
            m_instance.save()

    m_run.end = datetime.now()
    m_run.save()

    return (polygon_id, s_id, t_id)
Пример #55
0
 def measure(self):
     import tracemalloc
     current, peak = tracemalloc.get_traced_memory()
     snap = tracemalloc.take_snapshot()
     return current, snap
        path = Depth_First_Search(InitNode)
    elif Search == "BFS":
        path = Breadth_First_Search(InitNode)
    elif Search == "GBS":
        path = Greedy_Best_Search(InitNode)
    else:
        path = A_Star_Search(InitNode)
    
    # if a solution was found
    if path != False and Goal(path.state):
        # PathPrint(InitNode.state, path.moveSet)
        File.write("S," + str(nodesSearched) + ",") # solved (S), #moves
    else:
        File.write("F," + str(nodesSearched) + ",")  # failed (F), #move = 0

    curr_mem, max_mem = tracemalloc.get_traced_memory()
    # save execution time, curr memory and max memory usage 
    File.write(str(time.process_time()) + "," + str(2*curr_mem / 10**6) + "MB," + str(2*max_mem/ 10**6) + "MB\n")
    File.close()
    print("DENE w/" + Search + "," + str(sys.argv[1]) + "x" + str(sys.argv[1]) +"\n\n")
    
    # InitNode = Node()
    
    # # initializing pegboard 
    # InitNode.state = ExperimentState(9)

    # # # letting global size variables
    # cMax = len(InitNode.state[0])
    # rMax = len(InitNode.state)

    # path = A_Star_Search(InitNode)
Пример #57
0
#!/usr/bin/env python
"""
exercise2_1.py
"""

import tracemalloc
f = open('Data/ctabus.csv')
tracemalloc.start()
data = f.read()
d = len(data)
print(d)
current, peak = tracemalloc.get_traced_memory()
print(current)
print(peak)