示例#1
0
def test(Input):
    # Monolithic Test
    # tsP = time.time()
    RE1, RE2, S1, S2, CS1, CS2 = RE1e(), RE2e(), S1e(), S2e(), CS1e(), CS2e()
    CC = monolithic_enforcer('CC', RE1, RE2, S1, S2, CS1, CS2)
    # teP = time.time()
    tsP = time.time()
    accept = CC.checkAccept(Input)
    teP = time.time()

    if (SIZEOF):
        print(asizeof.asized(CC, detail=1).format())

    # Parallel Composition Test
    # tsC = time.time()
    RE1, RE2, S1, S2, CS1, CS2 = RE1e("pDFA"), RE2e("pDFA"), S1e("pDFA"), S2e(
        "pDFA"), CS1e("pDFA"), CS2e("pDFA")
    CC = parallel_enforcer(RE1, RE2, S1, S2, CS1, CS2)
    # teC = time.time()
    tsC = time.time()
    accept = CC.checkAccept(Input)
    teC = time.time()

    if (SIZEOF):
        print(asizeof.asized(CC, detail=1).format())

    return (teP - tsP) * 1000, (teC - tsC) * 1000
示例#2
0
    def test_globals(self):
        '''Test globals examples'''
        self._printf('%sasizeof(%s, limit=%s, code=%s) ... %s', os.linesep, 'globals()', 'MAX', False, '-glob[als]')
        asizeof.asizeof(globals(), limit=self.MAX, code=False, stats=1)
        self._print_functions(globals(), 'globals()', opt='-glob[als]')

        self._printf('%sasizesof(%s, limit=%s, code=%s) ... %s', os.linesep, 'globals(), locals()', 'MAX', False, '-glob[als]')
        asizeof.asizesof(globals(), locals(), limit=self.MAX, code=False, stats=1)
        asizeof.asized(globals(), align=0, detail=self.MAX, limit=self.MAX, code=False, stats=1)
示例#3
0
    def test_globals(self):
        '''Test globals examples'''
        self._printf('%sasizeof(%s, limit=%s, code=%s) ... %s', os.linesep, 'globals()', 'MAX', False, '-glob[als]')
        asizeof.asizeof(globals(), limit=self.MAX, code=False, stats=1)
        self._print_functions(globals(), 'globals()', opt='-glob[als]')

        self._printf('%sasizesof(%s, limit=%s, code=%s) ... %s', os.linesep, 'globals(), locals()', 'MAX', False, '-glob[als]')
        asizeof.asizesof(globals(), locals(), limit=self.MAX, code=False, stats=1)
        asizeof.asized(globals(), align=0, detail=self.MAX, limit=self.MAX, code=False, stats=1)
 def test_asized(self):
     '''Test asizeof.asized()
     '''
     self.assertEqual(list(asizeof.asized(detail=2)), [])
     self.assertRaises(KeyError, asizeof.asized, **{'all': True})
     sized = asizeof.asized(Foo(42), detail=2)
     self.assert_("Foo" in sized.name, sized.name)
     refs = [ref for ref in sized.refs if ref.name == '__dict__']
     self.assertEqual(len(refs), 1)
     refs = [ref for ref in refs[0].refs if ref.name == '[V] data: 42']
     self.assertEqual(len(refs), 1, refs)
     i = 42
     self.assertEqual(refs[0].size, asizeof.asizeof(i), refs[0].size)
示例#5
0
def calculate_model_parts_size(data_from_training, model_id, deep=7):
    """Calculates size of model trainer and records result json"""
    from pympler.asizeof import asized
    init_logger('trainmodel_log', obj=int(model_id))
    logging.info('Starting calculation size of model parts')

    model = Model.query.get(model_id)
    trainer = model.get_trainer()

    def walk_asized(asz):
        """Prepares output dict from Asized object"""
        res = {"name": asz.name, "size": asz.size, "properties": []}
        for r in asz.refs:
            res["properties"].append(
                {"name": r.name, "size": r.size,
                 "properties": [walk_asized(k) for k in r.refs]})
        return res

    try:
        result = walk_asized(asized(trainer, detail=deep))
        result["name"] = "trainer"
        model.model_parts_size = result
        model.save()
    except Exception as e:
        logging.error("Exception while calculating model parts size: {0} "
                      " \n {1}".format(e.message, get_task_traceback(e)))
    logging.info('Finished calculation')
    def __memory_info(self):
        # Get all memory info and get details with 20 depth
        size_obj = asizeof.asized(self._node, detail=20)
        whole_size = size_obj.size
        size_obj = next(r for r in size_obj.refs if r.name == '__dict__')
        size_dict = dict()
        # Sort in descending order to select most 'heavy' collections
        for num, sub_obj in enumerate(sorted(size_obj.refs, key=lambda v: v.size, reverse=True)):
            if num > 5:
                break
            size_dict[sub_obj.name] = dict()
            size_dict[sub_obj.name]['size'] = sub_obj.size

            # Check if this object (which include __dict__ and __class__) or iterable (dict, list, etc ..)
            if len(sub_obj.refs) <= 2 and any(r.name == '__dict__' for r in sub_obj.refs):
                sub_obj_ref = next(r for r in sub_obj.refs if r.name == '__dict__')
            else:
                sub_obj_ref = sub_obj

            for num, sub_sub_obj in enumerate(sorted(sub_obj_ref.refs, key=lambda v: v.size, reverse=True)):
                if num > 5:
                    break
                size_dict[sub_obj.name][sub_sub_obj.name] = sub_sub_obj.size

        result_dict = {"whole_node_size": whole_size}
        result_dict.update(size_dict)
        return {
            'Memory': result_dict
        }
示例#7
0
def print_memory_profiles(sm, tr, tr_sm, LOGFILE=None):
    '''
    Prints report on memory profiles

    IN:
        sm - SeriesModel - SeriesModel object for this run
        tr - SummaryTracker - SummaryTracker object for the whole run
        tr_sm - ClassTrackers - ClassTracker object of SeriesModel
        LOGFILE - file obj - Open logfile for print output
    OUT: None
    '''
    ptf('\nSERIESMODEL profiling', LOGFILE)
    ptf('Look at size of seriesmodel object', LOGFILE)
    ptf(asizeof.asizeof(sm), LOGFILE)
    ptf(asizeof.asized(sm, detail=1).format(), LOGFILE)

    ptf('Look at how the SeriesModel class is doing', LOGFILE)
    tr_sm.create_snapshot()
    tr_sm.stats.print_summary()
    tr_sm.stats.print_summary() >> LOGFILE

    ptf('PROFILING', LOGFILE)
    ptf('Look at memory leaks up to this point', LOGFILE)
    tr.print_diff() >> LOGFILE
    tr.print_diff()
示例#8
0
 def _print_functions(self,
                      obj,
                      name=None,
                      align=8,
                      detail=MAX,
                      code=False,
                      limit=MAX,
                      opt='',
                      **unused):
     if name:
         self._printf('%sasizeof functions for %s ... %s', os.linesep, name,
                      opt)
     self._printf('%s(): %s', ' basicsize', asizeof.basicsize(obj))
     self._printf('%s(): %s', ' itemsize', asizeof.itemsize(obj))
     self._printf('%s(): %r', ' leng', asizeof.leng(obj))
     self._printf('%s(): %s', ' refs', _repr(asizeof.refs(obj)))
     self._printf('%s(): %s', ' flatsize',
                  asizeof.flatsize(obj, align=align))  # , code=code
     self._printf(
         '%s(): %s', ' asized',
         asizeof.asized(obj,
                        align=align,
                        detail=detail,
                        code=code,
                        limit=limit))
示例#9
0
    def __memory_info(self):
        # Get all memory info and get details with 20 depth
        size_obj = asizeof.asized(self._node, detail=20)
        whole_size = size_obj.size
        size_obj = next(r for r in size_obj.refs if r.name == '__dict__')
        size_dict = dict()
        # Sort in descending order to select most 'heavy' collections
        for num, sub_obj in enumerate(
                sorted(size_obj.refs, key=lambda v: v.size, reverse=True)):
            if num > 5:
                break
            size_dict[sub_obj.name] = dict()
            size_dict[sub_obj.name]['size'] = sub_obj.size

            # Check if this object (which include __dict__ and __class__) or iterable (dict, list, etc ..)
            if len(sub_obj.refs) <= 2 and any(r.name == '__dict__'
                                              for r in sub_obj.refs):
                sub_obj_ref = next(r for r in sub_obj.refs
                                   if r.name == '__dict__')
            else:
                sub_obj_ref = sub_obj

            for num, sub_sub_obj in enumerate(
                    sorted(sub_obj_ref.refs,
                           key=lambda v: v.size,
                           reverse=True)):
                if num > 5:
                    break
                size_dict[sub_obj.name][sub_sub_obj.name] = sub_sub_obj.size

        result_dict = {"whole_node_size": whole_size}
        result_dict.update(size_dict)
        return {'Memory': result_dict}
示例#10
0
def print_memory_profiles(sm, tr, tr_sm, LOGFILE = None):
    '''
    Prints report on memory profiles

    IN:
        sm - SeriesModel - SeriesModel object for this run
        tr - SummaryTracker - SummaryTracker object for the whole run
        tr_sm - ClassTrackers - ClassTracker object of SeriesModel
        LOGFILE - file obj - Open logfile for print output
    OUT: None
    '''
    ptf( '\nSERIESMODEL profiling', LOGFILE)
    ptf( 'Look at size of seriesmodel object', LOGFILE)
    ptf( asizeof.asizeof(sm), LOGFILE)
    ptf( asizeof.asized(sm, detail=1).format(), LOGFILE)

    ptf( 'Look at how the SeriesModel class is doing', LOGFILE)
    tr_sm.create_snapshot()
    tr_sm.stats.print_summary()
    tr_sm.stats.print_summary() >> LOGFILE

    ptf( 'PROFILING', LOGFILE)
    ptf( 'Look at memory leaks up to this point', LOGFILE)
    tr.print_diff() >> LOGFILE
    tr.print_diff()
示例#11
0
 def test_asized_format(self):
     '''Test Asized.format(depth=x)
     '''
     foo = Foo(42)
     sized1 = asizeof.asized(foo, detail=1)
     sized2 = asizeof.asized(foo, detail=2)
     sized1_no = sized1.format('%(name)s', order_by='name')
     sized1_d1 = sized1.format('%(name)s', depth=1, order_by='name')
     sized1_d2 = sized1.format('%(name)s', depth=2, order_by='name')
     sized2_d1 = sized2.format('%(name)s', depth=1, order_by='name')
     sized2_d2 = sized2.format('%(name)s', depth=2, order_by='name')
     self.assertEqual(sized1_no, "Foo\n    __class__\n    __dict__")
     self.assertEqual(sized1_no, sized1_d1)
     self.assertEqual(sized1_no, sized1_d2)
     self.assertEqual(sized1_d1, sized2_d1)
     self.assertNotEqual(sized2_d1, sized2_d2)
示例#12
0
 def test_asized_format(self):
     '''Test Asized.format(depth=x)
     '''
     foo = Foo(42)
     sized1 = asizeof.asized(foo, detail=1)
     sized2 = asizeof.asized(foo, detail=2)
     sized1_no = sized1.format('%(name)s', order_by='name')
     sized1_d1 = sized1.format('%(name)s', depth=1, order_by='name')
     sized1_d2 = sized1.format('%(name)s', depth=2, order_by='name')
     sized2_d1 = sized2.format('%(name)s', depth=1, order_by='name')
     sized2_d2 = sized2.format('%(name)s', depth=2, order_by='name')
     self.assertEqual(sized1_no, "Foo\n    __class__\n    __dict__")
     self.assertEqual(sized1_no, sized1_d1)
     self.assertEqual(sized1_no, sized1_d2)
     self.assertEqual(sized1_d1, sized2_d1)
     self.assertNotEqual(sized2_d1, sized2_d2)
def test4(Input):
    # Monolithic Test
    tsP = time.time()
    RS, RT, RU = EM10(), EM11(), EM12()
    R = monolithic_enforcer('R', RS, RT, RU)
    tsP = time.time()
    accept = R.checkAccept(Input)
    teP = time.time()

    if (SIZEOF):
        print(asizeof.asized(R, detail=1).format())

    # Serial Composition Test
    tsS = time.time()
    RS, RT, RU = EM10("DFA"), EM11("DFA"), EM12("DFA")
    R = serial_composition_enforcer(RS, RT, RU)
    tsS = time.time()
    accept = R.checkAccept(Input)
    teS = time.time()

    if (SIZEOF):
        print(asizeof.asized(R, detail=1).format())

    # Maximal Prefix Parallel Composition Test
    tsM = time.time()
    RS, RT, RU = EM10("pDFA"), EM11("pDFA"), EM12("pDFA")
    R = maximal_prefix_parallel_enforcer(RS, RT, RU)
    tsM = time.time()
    accept = R.checkAccept(Input)
    teM = time.time()

    if (SIZEOF):
        print(asizeof.asized(R, detail=1).format())

    # Parallel Composition Test
    tsC = time.time()
    RS, RT, RU = EM10("pDFA"), EM11("pDFA"), EM12("pDFA")
    R = parallel_enforcer(RS, RT, RU)
    tsC = time.time()
    accept = R.checkAccept(Input)
    teC = time.time()

    if (SIZEOF):
        print(asizeof.asized(R, detail=1).format())

    return (teP - tsP) * 1000, (teS - tsS) * 1000, (teM - tsM) * 1000, (
        teC - tsC) * 1000
def test5(Input):
    # Monolithic Test
    tsP = time.time()
    RCS, RCT, RCU = EM13(), EM14(), EM15()
    RC = monolithic_enforcer('RC', RCS, RCT, RCU)
    tsP = time.time()
    accept = RC.checkAccept(Input)
    teP = time.time()

    if (SIZEOF):
        print(asizeof.asized(RC, detail=1).format())

    # Serial Composition Test
    tsS = time.time()
    RCS, RCT, RCU = EM13("DFA"), EM14("DFA"), EM15("DFA")
    RC = serial_composition_enforcer(RCS, RCT, RCU)
    tsS = time.time()
    accept = RC.checkAccept(Input)
    teS = time.time()

    if (SIZEOF):
        print(asizeof.asized(RC, detail=1).format())

    # Maximal Prefix Parallel Composition Test
    tsM = time.time()
    RCS, RCT, RCU = EM13("pDFA"), EM14("pDFA"), EM15("pDFA")
    RC = maximal_prefix_parallel_enforcer(RCS, RCT, RCU)
    tsM = time.time()
    accept = RC.checkAccept(Input)
    teM = time.time()

    if (SIZEOF):
        print(asizeof.asized(RC, detail=1).format())

    # Parallel Composition Test
    tsC = time.time()
    RCS, RCT, RCU = EM13("pDFA"), EM14("pDFA"), EM15("pDFA")
    RC = parallel_enforcer(RCS, RCT, RCU)
    tsC = time.time()
    accept = RC.checkAccept(Input)
    teC = time.time()

    if (SIZEOF):
        print(asizeof.asized(RC, detail=1).format())

    return (teP - tsP) * 1000, (teS - tsS) * 1000, (teM - tsM) * 1000, (
        teC - tsC) * 1000
示例#15
0
 def test_asized(self):
     '''Test asizeof.asized()
     '''
     self.assertEqual(list(asizeof.asized(detail=2)), [])
     self.assertRaises(KeyError, asizeof.asized, **{'all': True})
     sized = asizeof.asized(Foo(42), detail=2)
     self.assert_("Foo" in sized.name, sized.name)
     refs = [ref for ref in sized.refs if ref.name == '__dict__']
     self.assertEqual(len(refs), 1)
     refs = [ref for ref in refs[0].refs if ref.name == '[V] data: 42']
     self.assertEqual(len(refs), 1, refs)
     i = 42
     self.assertEqual(refs[0].size, asizeof.asizeof(i), refs[0].size)
     # Size multiple objects
     sizer = asizeof.Asizer()
     sized_objs = sizer.asized(Foo(3), Foo(4), detail=2)
     self.assertEqual(len(sized_objs), 2)
示例#16
0
 def test_namedtuple(self):
     '''Test namedtuple __dict__ property isn't included
     '''
     from collections import namedtuple
     Point = namedtuple('Point', ['x', 'y'])
     point = Point(x=11, y=22)
     size = asizeof.asized(point, detail=1)
     refs = [ref.name for ref in size.refs]
     self.assertTrue('__dict__' not in refs, refs)
示例#17
0
 def test_namedtuple(self):
     '''Test namedtuple __dict__ property isn't included
     '''
     from collections import namedtuple
     Point = namedtuple('Point', ['x', 'y'])
     point = Point(x=11, y=22)
     size = asizeof.asized(point, detail=1)
     refs = [ref.name for ref in size.refs]
     self.assertTrue('__dict__' not in refs, refs)
示例#18
0
文件: test.py 项目: Octoberr/sspywork
def pymp():
    tr = tracker.SummaryTracker()
    while True:
        try:
            # print(asizeof.asizesof(lst, dct))
            # print(asizeof.asized(lst, dct, detail=1))
            # tr.print_diff()
            prtd.printed = False
            sz_lst: asizeof.Asized = asizeof.asized(lst, detail=0)
            sz_dct: asizeof.Asized = asizeof.asized(dct, detail=0)
            print(f'''
lst-> size: {round(sz_lst.size/1048576,2)} MB   flat: {round(sz_lst.flat/1048576,2)} MB
dct-> size: {round(sz_dct.size/1048576,2)} MB   flat: {round(sz_dct.flat/1048576,2)} MB
            ''')
            prtd.printed = True
        except Exception as ex:
            logger.error(traceback.format_exc())
        finally:
            time.sleep(2)
示例#19
0
 def _print_functions(self, obj, name=None, align=8, detail=MAX, code=False, limit=MAX,
                           opt='', **unused):
     if name:
         self._printf('%sasizeof functions for %s ... %s', os.linesep, name, opt)
     self._printf('%s(): %s', ' basicsize', asizeof.basicsize(obj))
     self._printf('%s(): %s', ' itemsize',  asizeof.itemsize(obj))
     self._printf('%s(): %r', ' leng',      asizeof.leng(obj))
     self._printf('%s(): %s', ' refs',     _repr(asizeof.refs(obj)))
     self._printf('%s(): %s', ' flatsize',  asizeof.flatsize(obj, align=align))  # , code=code
     self._printf('%s(): %s', ' asized',           asizeof.asized(obj, align=align, detail=detail, code=code, limit=limit))
示例#20
0
    def test_asized(self):
        '''Test asizeof.asized()
        '''
        self.assertEqual(list(asizeof.asized(detail=2)), [])
        self.assertRaises(KeyError, asizeof.asized, **{'all': True})
        sized = asizeof.asized(Foo(42), detail=2)
        self.assertEqual(sized.name, 'Foo')
        refs = [ref for ref in sized.refs if ref.name == '__dict__']
        self.assertEqual(len(refs), 1)
        self.assertEqual(refs[0], sized.get('__dict__'))

        refs = [ref for ref in refs[0].refs if ref.name == '[V] data: 42']
        self.assertEqual(len(refs), 1, refs)
        i = 42
        self.assertEqual(refs[0].size, asizeof.asizeof(i), refs[0].size)
        # Size multiple objects
        sizer = asizeof.Asizer()
        sized_objs = sizer.asized(Foo(3), Foo(4), detail=2)
        self.assertEqual(len(sized_objs), 2)
    def _record_memory(self, message, extra=None):
        if not self.enable_memory_log:
            return None

        if extra is None:
            extra = {}

        sizer = asizeof.asized(self)
        extra.update({'mem_size': sizer.size, 'mem_flat': sizer.flat})
        self.log.info('%s size=%s flat=%s %s' %
                      (self, sizer.size, sizer.flat, message),
                      extra=extra)
示例#22
0
def main(args):

    ds_tr = MNIST(args.ds_root, set_=True, lmdb_=args.lmdb)
    ds_te = MNIST(args.ds_root, set_=False, lmdb_=args.lmdb)

    dl_kargs = {
        'batch_size': 1,
        'num_workers': 0,
        'pin_memory': True,
        'drop_last': False,
    }
    dl_tr = DataLoader(ds_tr, shuffle=True, **dl_kargs)
    dl_te = DataLoader(ds_te, shuffle=False, **dl_kargs)

    for cur_data in enumerate(dl_tr):
        data_size = sys.getsizeof(cur_data)
        data_info = asizeof.asized(cur_data, detail=1).format()
        print(data_size, data_info)
    for cur_data in enumerate(dl_te):
        data_size = sys.getsizeof(cur_data)
        data_info = asizeof.asized(cur_data, detail=1).format()
        print(data_size, data_info)
示例#23
0
def _compute_sizes(obj, min_size=10000):
    """Return the sizes of the attributes of an object."""
    size = asized(obj, detail=2)
    for ref in size.refs:
        if ref.name == '__dict__':
            break
    else:
        raise Exception('Cannot find __dict__ reference')

    return {
        item.name.split(':')[0][4:]: item.size
        for item in ref.refs if item.size > min_size
    }
示例#24
0
def print_memory_profiles(sm, tr, tr_sm, LOGFILE = None):
    '''prints report on memory profiles'''
    ptf( '\nSERIESMODEL profiling', LOGFILE)
    ptf( 'Look at size of seriesmodel object', LOGFILE)
    ptf( asizeof.asizeof(sm), LOGFILE)
    ptf( asizeof.asized(sm, detail=1).format(), LOGFILE)

    ptf( 'Look at how the SeriesModel class is doing', LOGFILE)
    tr_sm.create_snapshot()
    tr_sm.stats.print_summary()
    tr_sm.stats.print_summary() >> LOGFILE

    ptf( 'PROFILING', LOGFILE)
    ptf( 'Look at memory leaks up to this point', LOGFILE)
    tr.print_diff() >> LOGFILE
    tr.print_diff()
def main_process():
    t = '''
    q: 当退出Python时,是否释放全部内存?
    ans:答案是No。循环引用其它对象或引用自全局命名空间的对象的模块,在Python退出时并非完全释放。
    另外,也不会释放C库保留的内存部分
    '''
    print(colored('mycount=', 'red'), t)


    obj = [1, 2, (3, 4), 'text']
    print(asizeof.asizeof(obj))
    print(asizeof.asized(obj, detail=1).format())

    tr = tracker.SummaryTracker()
    a = [[random.random() for i in range(2000)] for i in range(2000)]
    tr.print_diff()

    gc.collect()
    from sys import getsizeof
    print('-'*20, getsizeof(a))
def test2(Input):
    # Monolithic Test
    tsP = time.time()
    A, B, C = EM1(), EM2(), EM3()
    A_B_C = monolithic_enforcer('A_B_C', A, B, C)
    tsP = time.time()
    accept = A_B_C.checkAccept(Input)
    teP = time.time()

    if (SIZEOF):
        print(asizeof.asized(A_B_C, detail=1).format())

    # Parallel Composition Test
    tsC = time.time()
    A, B, C = EM1("pDFA"), EM2("pDFA"), EM3("pDFA")
    A_B_C = parallel_enforcer(A, B, C)
    tsC = time.time()
    accept = A_B_C.checkAccept(Input)
    teC = time.time()

    return (teP - tsP) * 1000, (teC - tsC) * 1000
def test3(Input):
    # Monolithic Test
    tsP = time.time()
    R1, R2, R3, R4, R5, R6 = EM4(), EM5(), EM6(), EM7(), EM8(), EM9()
    R = monolithic_enforcer('R', R1, R2, R3, R4, R5, R6)
    tsP = time.time()
    accept = R.checkAccept(Input)
    teP = time.time()

    if (SIZEOF):
        print(asizeof.asized(R, detail=1).format())

    # Parallel Composition Test
    tsC = time.time()
    R1, R2, R3, R4, R5, R6 = EM4("pDFA"), EM5("pDFA"), EM6("pDFA"), EM7(
        "pDFA"), EM8("pDFA"), EM9("pDFA")
    R = parallel_enforcer(R1, R2, R3, R4, R5, R6)
    tsC = time.time()
    accept = R.checkAccept(Input)
    teC = time.time()

    return (teP - tsP) * 1000, (teC - tsC) * 1000
示例#28
0
def sub_string(seq1, seq2):
    """ Return sub-strings from seq2 which are part of strings in seq1
    - Optimized version
    """

    # E.g: seq1 = ['introduction','discipline','animation']
    # seq2 = ['in','on','is','mat','ton']
    # Result = ['in','on','mat','is']

    # Create all slices of lengths in a given range
    min_l, max_l = min(map(len, seq2)), max(map(len, seq2))
    sequences = {}

    for i in range(min_l, max_l+1):
        for string in seq1:
            sequences.update({}.fromkeys(slices(string, i)))

    subs = []
    for item in seq2:
        if item in sequences:
            subs.append(item)
    print('Memory usage',asizeof.asized(sequences).format())            

    return subs
示例#29
0
文件: test.py 项目: albertotb/pydlm
model1 = odlm([]) + trend(degree=2, discount=0.95, name='trend1')
model1.stableMode(False)

model2 = dlm([]) + trend(degree=2, discount=0.95, name='trend1')
model2.stableMode(False)

d1 = {}
d2 = {}
for idx, el in enumerate(ts):
    model1.append([el], component='main')
    model1.fitForwardFilter()

    model2.append([el], component='main')
    model2.fitForwardFilter()

    a1 = asizeof.asized(model1, detail=4)
    a2 = asizeof.asized(model2, detail=4)

    mean1, var1 = model1.predictN(N=1, date=model1.n - 1)
    mean2, var2 = model2.predictN(N=1, date=model2.n - 1)

    np.testing.assert_almost_equal(mean1,
                                   mean2,
                                   decimal=7,
                                   err_msg='',
                                   verbose=True)
    np.testing.assert_almost_equal(var1,
                                   var2,
                                   decimal=7,
                                   err_msg='',
                                   verbose=True)
示例#30
0
    def handle(self, handler):
        '''
        '''
        content = None

        splitted_request = handler.request.uri.split('/')

        path = '/'.join(splitted_request[2:])

        if splitted_request[1] == 'tree':

            data_path = path.split('?')[0]
            parameters = path.split('?')[1].split('&')

            if parameters[0][0] != '_':
                data_path = urllib.unquote(parameters[0].split('=')[1])
            else:
                data_path = None

            content = json.dumps(self._manager.get_tree(data_path))
            content_type = 'text/html'

        elif splitted_request[1] == 'type':

            content = self._manager.check_path_type(path)
            if not content:
                content = 'NULL'
            content_type = 'text/html'

        elif splitted_request[1] == 'content':

            content = json.dumps(self._manager.get_content(path))
            content_type = 'text/html'

        elif splitted_request[1] == 'metainfo':

            content = self._manager.get_meta_info(path)
            content_type = 'text/html'

        elif splitted_request[1] == 'data':

            # this is for actual image data
            path = '/'.join(splitted_request[2:-1])

            tile = splitted_request[-1].split('-')

            x = int(tile[1])
            y = int(tile[2])
            z = int(tile[3])
            w = int(tile[0])

            content = self._manager.get_image(path, x, y, z, w)
            content_type = 'image/jpeg'

        elif splitted_request[1] == 'debug_mem':

            from pympler import asizeof
            content = asizeof.asized(self,
                                     detail=7).format().replace('\n', '<br/>')
            content += ' '.join([
                '{}: {}<br/>'.format(ii, kk)
                for ii, kk in enumerate(self._manager._tiles.keys())
            ])
            content_type = 'text/html'

        # invalid request
        if not content:
            content = 'Error 404'
            content_type = 'text/html'

        # handler.set_header('Cache-Control',
        #                    'no-cache, no-store, must-revalidate')
        # handler.set_header('Pragma','no-cache')
        # handler.set_header('Expires','0')
        handler.set_header('Access-Control-Allow-Origin', '*')
        handler.set_header('Content-Type', content_type)
        handler.write(content)
示例#31
0
        or (hopefully) any other deap mutli obj ftn handles this Individual class
        http://deap.readthedocs.io/en/master/api/base.html#fitness
        '''

        def __init__(self):
            self.values = () #empty tuple

        # check dominates
        def dominates(self, other):
            a = np.array(self.values)
            b = np.array(other.values)
            # 'self' must be at least as good as 'other' for all objective fnts (np.all(a>=b))
            # and strictly better in at least one (np.any(a>b))
            return np.any(a < b) and np.all(a <= b)


if __name__ == "__main__":
    '''
    quickly build an individual to check it's footprint
    '''
    block_def = BlockDefinition(...)
    individual_def = IndividualDefinition([block_def])
    individual = IndividualMaterial(individual_def)

    print("with sys...\n", sys.getsizeof(individual))
    try:
        from pympler import asizeof
        print("with pympler...\n", asizeof.asizeof(individual))
        print(asizeof.asized(ting, detail=1).format())
    except ModuleNotFoundError:
        print("module pympler not installed...skipping")
示例#32
0
 def test_asized_detail(self):
     foo = Foo(42)
     size1 = asizeof.asized(foo, detail=1)
     size2 = asizeof.asized(foo, detail=2)
     self.assertEqual(size1.size, size2.size)
示例#33
0
	def print_object_profile(self,obj):
		print (asizeof.asized(obj, detail=1).format())
示例#34
0
async def log_data_store(scheduler, state):
    """Count the number of objects and the data store size."""
    state['times'].append(time())
    for key, value in _iter_data_store(scheduler.data_store_mgr.data):
        state['objects'][key].append(len(value))
        state['size'][key].append(asized(value).size)
示例#35
0
 def test_asized_detail(self):
     foo = Foo(42)
     size1 = asizeof.asized(foo, detail=1)
     size2 = asizeof.asized(foo, detail=2)
     self.assertEqual(size1.size, size2.size)
示例#36
0
文件: webserver.py 项目: Rhoana/mb
    def handle(self, handler):
        '''
        '''
        content = None

        splitted_request = handler.request.uri.split('/')

        path = '/'.join(splitted_request[2:])

        if splitted_request[1] == 'tree':

            data_path = path.split('?')[0]
            parameters = path.split('?')[1].split('&')

            if parameters[0][0] != '_':
                data_path = urllib.unquote(parameters[0].split('=')[1])
            else:
                data_path = None

            content = json.dumps(self._manager.get_tree(data_path))
            content_type = 'text/html'

        elif splitted_request[1] == 'type':

            content = self._manager.check_path_type(path)
            if not content:
                content = 'NULL'
            content_type = 'text/html'

        elif splitted_request[1] == 'content':

            content = json.dumps(self._manager.get_content(path))
            content_type = 'text/html'

        elif splitted_request[1] == 'metainfo':

            content = self._manager.get_meta_info(path)
            content_type = 'text/html'

        elif splitted_request[1] == 'data':

            # this is for actual image data
            path = '/'.join(splitted_request[2:-1])

            tile = splitted_request[-1].split('-')

            x = int(tile[1])
            y = int(tile[2])
            z = int(tile[3])
            w = int(tile[0])

            content = self._manager.get_image(path, x, y, z, w)
            content_type = 'image/jpeg'

        elif splitted_request[1] == 'debug_mem':

            from pympler import asizeof
            content = asizeof.asized(self, detail=7).format().replace('\n','<br/>')
            content += ' '.join(['{}: {}<br/>'.format(ii, kk) for ii, kk in enumerate(self._manager._tiles.keys())])
            content_type = 'text/html'

        # invalid request
        if not content:
            content = 'Error 404'
            content_type = 'text/html'

        # handler.set_header('Cache-Control',
        #                    'no-cache, no-store, must-revalidate')
        # handler.set_header('Pragma','no-cache')
        # handler.set_header('Expires','0')
        handler.set_header('Access-Control-Allow-Origin', '*')
        handler.set_header('Content-Type', content_type)
        handler.write(content)
示例#37
0
def test_memory_debugging(looper, nodeSet, sdk_wallet_endorser,
                          sdk_pool_handle, logger):
    # Settings
    requests_count = 500
    file_name = '.memory_data.txt'

    # Sets for emulating commits problems
    set1 = list(nodeSet)
    set1.remove(nodeSet[0])
    set2 = list(nodeSet)
    set2.remove(nodeSet[1])
    set3 = list(nodeSet)
    set3.remove(nodeSet[2])
    primary = nodeSet[0]

    memory_dicts = OrderedDict()

    memory_dicts['After starting'] = asizeof.asized(primary, detail=15)

    while primary.master_replica.lastPrePrepareSeqNo < requests_count:
        sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_endorser)

    memory_dicts['After ordering'] = asizeof.asized(primary, detail=15)

    # Emulate commit sending problems
    dont_send_commit_to(set1, nodeSet[0].name)
    dont_send_commit_to(set2, nodeSet[1].name)
    dont_send_commit_to(set3, nodeSet[2].name)

    # Sending requests until nodes generate `unordered_requests_count` 3pc batches
    while primary.master_replica.lastPrePrepareSeqNo < requests_count * 2:
        sdk_add_new_nym_without_waiting(looper, sdk_pool_handle,
                                        sdk_wallet_endorser)

    memory_dicts['After {} unordered'.format(requests_count)] = asizeof.asized(
        primary, detail=15)

    # Remove commit problems
    reset_sending(set1)
    reset_sending(set2)
    reset_sending(set3)

    # primary ask for commits
    for i in range(primary.master_replica.last_ordered_3pc[1],
                   primary.master_replica.lastPrePrepareSeqNo):
        primary.replicas._replicas.values()[0]._request_commit((0, i))
    for i in range(primary.replicas._replicas.values()[1].last_ordered_3pc[1],
                   primary.replicas._replicas.values()[1].lastPrePrepareSeqNo):
        primary.replicas._replicas.values()[1]._request_commit((0, i))
    looper.runFor(5)

    memory_dicts['After {} ordered'.format(requests_count)] = asizeof.asized(
        primary, detail=15)

    # primary clear queues
    primary.replicas._replicas.values()[0]._gc(
        primary.replicas._replicas.values()[0].last_ordered_3pc)
    primary.replicas._replicas.values()[1]._gc(
        primary.replicas._replicas.values()[1].last_ordered_3pc)

    memory_dicts['After _gc called'] = asizeof.asized(primary, detail=15)

    # Emulate problems again
    dont_send_commit_to(set1, nodeSet[0].name)
    dont_send_commit_to(set2, nodeSet[1].name)
    dont_send_commit_to(set3, nodeSet[2].name)

    while primary.master_replica.lastPrePrepareSeqNo < requests_count * 3:
        sdk_add_new_nym_without_waiting(looper, sdk_pool_handle,
                                        sdk_wallet_endorser)

    memory_dicts['After {} unordered again'.format(
        requests_count)] = asizeof.asized(primary, detail=15)

    # Remove commit problems
    reset_sending(set1)
    reset_sending(set2)
    reset_sending(set3)

    for i in range(primary.master_replica.last_ordered_3pc[1],
                   primary.master_replica.lastPrePrepareSeqNo):
        primary.replicas._replicas.values()[0]._request_commit((0, i))
    for i in range(primary.replicas._replicas.values()[1].last_ordered_3pc[1],
                   primary.replicas._replicas.values()[1].lastPrePrepareSeqNo):
        primary.replicas._replicas.values()[1]._request_commit((0, i))
    looper.runFor(5)

    memory_dicts['After {} ordered again'.format(
        requests_count)] = asizeof.asized(primary, detail=15)

    primary.replicas._replicas.values()[0]._gc(
        primary.replicas._replicas.values()[0].last_ordered_3pc)
    primary.replicas._replicas.values()[1]._gc(
        primary.replicas._replicas.values()[1].last_ordered_3pc)

    memory_dicts['After _gc called again'] = asizeof.asized(primary, detail=15)

    file = open(file_name, 'w')
    indent = 75
    for k, size_obj in memory_dicts.items():
        # Formatting
        header = str(k) + ': {}'.format(
            size_obj.size) + ' bytes. Detailed size:'
        if len(header) < indent:
            header += ' ' * (indent - len(header))
        file.write(header)

        size_obj = next(r for r in size_obj.refs if r.name == '__dict__')
        # Sort in descending order to select most 'heavy' collections
        for num, sub_obj in enumerate(
                sorted(size_obj.refs, key=lambda v: v.size, reverse=True)):
            if num > 10:
                break
            file.write('[{} : {}],      '.format(sub_obj.name, sub_obj.size))
        file.write('\n')
    file.close()
示例#38
0
    def __init__(self, title):
        self.title = title


tr = tracker.SummaryTracker()

ENGLISH = Subject('English')
ANIMATION = Subject('Animation')
COMPUTING = Subject('Computing')

class1 = [
    Student('Jasmine', 20, ENGLISH),
    Student('Adam', 20, ENGLISH),
    Student('Eloise', 20, ENGLISH)
]

class2 = [Student('Phoebe', 22, ANIMATION), Student('Gryff', 22, ANIMATION)]

class3 = [
    Student('Tom', 24, COMPUTING),
    Student('James', 21, COMPUTING),
    Student('Andrew', 23, COMPUTING)
]

year = list(chain(class1, class2, class3))

print('asizeof.asizeof([year])', asizeof.asizeof([year]))
print(asizeof.asized([year], detail=3).format())

tr.print_diff()
示例#39
0
def _get_object_size(object_):
    object_size = sys.getsizeof(object_)
    object_info = asizeof.asized(object_, detail=1).format()
    print(f' object_size: {object_size}, \n object_info: {object_info} \n')
示例#40
0
"""
Examples of how to use Pympler: https://pythonhosted.org/Pympler/
to analyze the memory usage of Python code.
"""
import sys
from pympler import asizeof
from pympler import classtracker

# Calculate memory usage for a list
list_obj = [1, 2, 3, 'abc']
print(sys.getsizeof(list_obj))
print(asizeof.asizeof(list_obj))
print(asizeof.asized(list_obj, detail=1).format())


# track memory usage for a class
class C:
    def __init__(self, iterable):
        self.data = list(iterable)


tr = classtracker.ClassTracker()
tr.track_class(C)
tr.create_snapshot()
c1 = C([1, 2, 3])
c2 = C([1, 2, 3, 4, 5])
tr.create_snapshot()
tr.stats.print_summary()

# track memory usage for an instance
tracker = classtracker.ClassTracker()