示例#1
0
def solution(problem_id):
    '''
    do calculation only: 
    1) module is loaded for the first time
    2) module source file is modified since last time module was loaded
    3) no cached result and/or profile_stats (if requesting profile_stats)
    '''
    try:
        # load solution module, using ModuleManager
        module_name = 'solutions.problem{0}'.format(problem_id)
        do_calculation = module_manager.add_module(module_name)
        module = module_manager.get_module(module_name)
    except ImportError:
        return render_template('solution.html',
            problem_id=problem_id) 

    problem_content = module.__doc__

    try:
        do_profile = request.args.get('profile', '')

        # check cache
        result_key = 'problem_{0}_result'.format(problem_id)
        result = cache.get(result_key)
        profile_stats_key = 'problem_{0}_profile_stats'.format(problem_id)
        profile_stats = cache.get(profile_stats_key)
        if result == None:
            do_calculation = True
        if do_profile == 'true' and profile_stats == None:
            do_calculation = True
        # do calculation
        if do_calculation:
            if do_profile == 'true':
                p = Profiler(module.solution)
                result = p()
                profile_stats = p.get_stats()
            else:
                result = module.solution()
                profile_stats = None
            # update cache
            cache.set(result_key, result)
            cache.set(profile_stats_key, profile_stats)

        # render template
        return render_template("solution.html",
            problem_id=problem_id,
            problem_content=problem_content,
            result=result,
            profile_stats=profile_stats)

    except KeyError:
        return render_template('error.html'), 404
logger = get_logger('learn2test')
logger.setLevel(logging.DEBUG)


if __name__ == '__main__':
    parser = ConfigArgumentParser(conflict_handler='resolve')
    parser.add_argument('--test-batch', type=int, default=32)
    parser.add_argument('--tta', type=str, default='center')
    parser.add_argument('--deform', type=str, default='')
    parser.add_argument('--corrupt', type=str, default='')
    args = parser.parse_args()

    assert args.dataset == 'imagenet'

    model_target = get_model(args.target_network, gpus=[0], num_classes=args.num_classes, train_aug=args.target_aug).eval()
    profiler = Profiler(model_target)
    print('target network, FLOPs=', profiler.flops(torch.zeros((1, 3, C.get()['target_size'], C.get()['target_size'])).cuda(), ))

    scaled_size = int(math.floor(args.target_size / 0.875))

    if args.deform != '':
        deform_type, deform_level = args.deform.split(' ')
        if deform_type in ['rotate', 'rotation']:
            t = torchvision.transforms.Lambda(lambda img_orig: torchvision.transforms.functional.rotate(img_orig, int(deform_level), resample=PIL.Image.BICUBIC))
        elif deform_type == 'bright':
            t = torchvision.transforms.Lambda(lambda img_orig: torchvision.transforms.functional.adjust_brightness(img_orig, float(deform_level)))
        elif deform_type == 'zoom':
            resize = int(scaled_size * float(deform_level))
            t = torchvision.transforms.Lambda(lambda img_orig: torchvision.transforms.functional.resize(img_orig, resize, interpolation=PIL.Image.BICUBIC))
        elif deform_type:
            raise ValueError('Invalid Deformation=%s' % deform_type)
示例#3
0
 def enable_profiling(self):
     self.do_profiling =True
     self.profiler = Profiler()
     return
示例#4
0
class Environment:
    """ used to manage the side-effects of an interpreter """
    def __init__(self,call_stack, function_map, builtin_map, dbg = False, MAX_REC_DEPTH  = 128 ):
        self.do_profiling = False
        self.profiler = None
        self.max_recursion_depth = MAX_REC_DEPTH #keep it smaller than Python stack
        self.call_stack = call_stack#list
        self.function_map = function_map#dicts
        self.builtin_map = builtin_map#dicts
        self.local_vars = []#list of dicts.
        self.arg_stack = []#list of lists
        self.ret_stack = [] #list
        self.debug = dbg #use to turn debugging on.
        self.readonly_global_vars = {'True':True, 'False':False,u"மெய்":True, u"பொய்":False} #dict of global vars
        self.clear_break_return_continue()

    def  __del__(self):
        if (self.debug): print(u"deleting environment")
        
    def enable_profiling(self):
        self.do_profiling =True
        self.profiler = Profiler()
        return
    
    def is_profiling(self):
        return self.do_profiling
    
    def disable_profiling(self):
        self.do_profiling = False
        
    def report_profiling(self):
        self.disable_profiling()
        self.profiler.report_stats()
        
    def reset_profiling(self):
        self.disable_profiling()
        self.profiler = None
        
    def unroll_stack(self):
        # we skip BOS since it is a __toplevel__
        if (self.debug): print("clearing locals")
        if len(self.call_stack) > 0:
            if self.call_stack[-1] != "__toplevel__":
                self.clear_call()
        
        for i in range(1,len(self.call_stack)):
            print(u"Error in function called from %s"%str(self.call_stack[i]))
        while len(self.call_stack) > 0:
            tos = self.call_stack.pop()
            #print(u"Error in location %s"%str(tos))
        return
    
    def get_break_return(self):
        """ get if break or return was set for use in loops """
        val = self.Break or self.Return
        return val

    def clear_break(self):
        """ reset after a break statement """
        self.Break = False
        return False

    def clear_continue(self):
        """ reset after a continue statement """
        self.Continue = False
        return False

    def set_break(self):
        """ execute a continue statement """
        self.Break = True
        return True

    def set_continue(self):
        """ execute a continue statement """
        self.Continue = True
        return True
    
    def break_return_continue(self):
        val = ( self.Break or 
                 self.Return or 
                 self.Continue )
        ## must clear continue flag right away.
        if ( self.Continue ):
            self.Continue = False
        return val
    
    def dbg_msg(self, msg):
        """ handy to print debug messages """
        if ( self.debug ):
            print(msg)
        return
    
    def __unicode__(self):
        if (self.debug):
            return repr(self)
        return u"<env>"
    
    def __repr__(self):
        retval = u"CallStack =>"+unicode(self.call_stack) + u"\n" \
            u"LocalVars =>"+ unicode(self.local_vars) + u"\n" \
            u"ArgStack =>"+ unicode(self.arg_stack) + u"\n" 
        return retval

    def set_retval( self, rval ):
        self.ret_stack.append( rval )
        self.Return = True
        return
    
    def get_retval( self ):
        rval = None
        if ( len(self.ret_stack ) >= 1  ):
            rval = self.ret_stack.pop()
        return rval

    def clear_break_return_continue(self):
        self.Break = False
        self.Return = False
        self.Continue = False

    def clear_call(self,copyvars=False):
        """ utility to cleanup the stacks etc.. """
        self.clear_local( copyvars ) 
        self.clear_args ( )
        self.clear_break_return_continue()
        
    def clear_args(self):
        """ cleanup the stack """
        self.arg_stack.pop()
        return

    def get_args(self):
        """ manage a global argument stack """
        return self.arg_stack[-1]

    def set_args(self,val):
        """ manage a global argument stack """
        self.dbg_msg( "setting args " + unicode( val ) )
        return self.arg_stack.append(val)
    
    def set_local(self, vars):
        self.local_vars.append(vars)
        self.dbg_msg( "setting locals " + unicode( vars ) )
        self.clear_break_return_continue()
        return
    
    def clear_local(self,copyvars=False):
        prev_values = self.local_vars.pop()
        if copyvars:
            if len(self.local_vars) < 1:
                self.local_vars.append({})
            self.local_vars[-1].update(prev_values)
        return
    
    def has_id(self, idee):
        """ check various 'scopes' for ID variable """
        rval = False
        if idee in self.readonly_global_vars:
            return True
        if ( len( self.local_vars ) == 0 ):
            return False
        variables = self.local_vars[-1]
        rval = idee in variables
        return rval

    def set_id(self, idee, val, global_id = False):
        """ someday do global_id """
        if idee in self.readonly_global_vars:
            raise Exception(u"Error: Attempt to reassign constant %s"%idee)
        if ( len(self.local_vars) > 0 ):
            d=self.local_vars[-1]
        else:
            d=dict()
            self.local_vars.append(d)
        #if not (idee in d) and (idee in self.global_vars):
        #    self.global_vars[idee] = val
        # else:
        d[idee]=val
        self.dbg_msg("set_id: " + unicode(idee) +" = "+unicode(val))
        return

    def get_id(self, idee):
        val = None
        if idee in self.readonly_global_vars:
            return self.readonly_global_vars[idee]
        if not self.has_id(idee):
            note = ''
            if idee in keyword.kwlist:
                note = 'Did you possibly confuse the Python english keyword %s for Ezhil keyword ?'%idee
            raise RuntimeException("Identifier %s not found"%idee)
        variables = self.local_vars[-1]
        val = variables[idee]
        self.dbg_msg("get_id: val = "+unicode(val))
        return val

    def call_function(self, fn):
        """ set call stack, used in function calls. Also check overflow"""
        if ( len(self.call_stack) >= self.max_recursion_depth ):
            raise RuntimeException( "Maximum recursion depth [ " + 
                                    unicode(self.max_recursion_depth) + 
                                    " ] exceeded; stack overflow." )
        self.dbg_msg(u"calling function"+unicode(fn))
        if ( self.is_profiling() ):
            self.profiler.add_function( fn )
        self.call_stack.append( fn )
    
    def return_function(self, fn):
        va = self.call_stack.pop( )
        if ( fn != va ):
            raise RuntimeException("function %s doesnt match Top-of-Stack"%fn)
        if ( self.is_profiling() ):
            self.profiler.update_function( va )
        return va
    
    def has_function(self, fn):
        if ( fn in self.builtin_map ):
            return True
        if ( fn in self.function_map ):
            return True
        return False
    
    def get_function(self, fn):
        if not self.has_function(fn):
            raise RuntimeException("undefined function: "+fn)

        if ( fn in self.builtin_map ):
            return self.builtin_map[fn]
        
        if ( fn in self.function_map ):
            return self.function_map[fn] 
        
        raise RuntimeException("Environment error on fetching function "+fn)
示例#5
0
    set_aug_ts = set_aug
    set_def_ts = set_def

    set_aug_tr.num_class = set_aug_vl.num_class = set_aug_ts.num_class = set_def_ts.num_class = num_class

    """
    tar_model : target model
    l2t_model : model which predict relative losses from tar_model
    ema_model : EMA-ed l2t_model
    """
    tar_model = get_model(args.target_network, gpus=[0], num_classes=num_class, train_aug=args.target_aug).eval()
    l2t_model = get_model(args.network, gpus=[0], num_classes=tta_num).train()
    ema_model = get_model(args.network, gpus=[0], num_classes=tta_num).train()
    ema = EMA(args.ema_momentum)

    profiler = Profiler(tar_model)
    logger.info(f'target network, FLOPs={profiler.flops(torch.zeros((1, 3, C.get()["target_size"], C.get()["target_size"])).cuda(), )}')

    profiler = Profiler(l2t_model)
    logger.info(f'L2T network, FLOPs={profiler.flops(torch.zeros((32, 3, C.get()["size"], C.get()["size"])).cuda(), ) / 32.}')

    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(l2t_model.parameters(), args.lr, momentum=args.momentum, weight_decay=0.0, nesterov=True)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(l2t_model.parameters(), args.lr, weight_decay=0.0, amsgrad=True)
    elif args.optimizer == 'rmsproptf':
        optimizer = RMSpropTF(l2t_model.parameters(), args.lr, weight_decay=0.0, alpha=0.9, momentum=0.9, eps=0.001)
    else:
        raise ValueError(args.optimizer)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epoch, eta_min=0.)