def __init__(self, opt, model, frame_rate=30): self.logger = Log(__name__, 'JDETracker').getlog() self.opt = opt self.model = model if opt.gpus[0] >= 0: opt.device = torch.device('cuda') else: opt.device = torch.device('cpu') # self.model = create_model(opt.arch, opt.heads, opt.head_conv) # self.model = load_model(self.model, opt.load_model) # self.model = self.model.to(opt.device) # self.model.eval() self.tracked_stracks = [] # type: list[STrack] self.lost_stracks = [] # type: list[STrack] self.removed_stracks = [] # type: list[STrack] self.frame_id = 0 self.det_thresh = opt.conf_thres self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer) self.max_time_lost = self.buffer_size self.max_per_image = opt.K self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3) self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3) self.kalman_filter = KalmanFilter()
def steepest_hill_climbing_redemarrage(problem, methode, args, nb_essais=1, log=None): if log is None: log = Log() log.write("START Redémarrage", 0) log.write(problem, 1) log.write("%s essais" % str(nb_essais), 1) log.write("Methode : %s\n" % str(methode), 1) possible_starts = problem.get_random_solutions(nb_essais) best = [] for solution in possible_starts: tmp = methode(problem, solution, args, log) if best == [] or tmp[2].meilleur_que(best[2]): best = tmp log.write("Nouvel optimum local : %s" % best[2].tostr(), 1) log.write("END Redémarrage", 0) log.write("Depart : %s" % best[0].tostr(), 0) log.write("Optimum local : %s" % best[2].tostr(), 0) log.write("Trouvé en %d déplacements" % best[1], 0) return best
def setUp(self): self.uri = 'http://e.mi.com/openapi/' self.signId = 'HyASGBRpjkmwqdmo' self.customerId = '112651' self.port = 'campaign/list' self.method = 'GET' self.log = Log("TestCreativeList").print_log()
def __init__(self, base_url, method='GET', headers=None, cookies=None): """ :param method: 请求方式,类型要与METHOD中的一致 :param base_url: 请求地址like:1."http://e.mi.com/get" 2."http://e.mi.com/get?customerId=3306&creativeId=1000789" :param headers: 请求的headers参数,入参类型为字典(dict) :param cookies: 请求的cookies参数,入参类型为字典(dict) """ self.url = base_url if method in METHOD: self.method = method else: raise UnKnowMethod('不能识别参数%s' % method) self.header = headers self.cookie = cookies # 声明日志类 self.log = Log('httpclient').print_log() # 新建会话 self.session = requests.session() self.set_session_headers(self.header) self.set_session_cookies(self.cookie) # 获取调用函数的名字 self.module_name = traceback.extract_stack()[-2][2] # 获取调用函数所在文件的文件名 self.module_file = os.path.basename( str(traceback.extract_stack()[-2][0]))
def __init__(self, config, model): self.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') self.model = model.to(self.device) self.optimizer = Adam(self.model.parameters(), lr=config.learning_rate) self.criterion = LOSS_FUNCTIONS[config.loss_type] if config.lookahead: self.optimizer = Lookahead(optimizer=self.optimizer, k=5, alpha=0.5) if config.fp16: self.fp16 = True try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) self.model, self.optimizer = amp.initialize( self.model, self.optimizer, opt_level=config.fp16_opt_level) else: self.fp16 = False if config.adv_type == 'fgm': self.fgm = FGM(self.model) else: self.fgm = None if config.flooding: self.flooding = config.flooding else: self.flooding = None if config.max_gradient_norm: self.max_gradient_norm = config.max_gradient_norm else: self.max_gradient_norm = None if config.init_weight: self.init_network() self.num_epochs = config.num_epochs self.start_time = 0 self.best_score = float('-inf') self.best_loss = float('inf') self.best_epoch = 0 self.best_step = 0 self.patience_counter = 0 self.patience = config.patience self.best_model_file = config.model_path base_dir = config.log_dir + '/' + config.model + '_' + config.task_name + '/' self.train_writer = SummaryWriter(log_dir=base_dir + 'train') self.eval_writer = SummaryWriter(log_dir=base_dir + 'eval') self.logger = Log(base_dir, config.log_level) self.logger.info('Config', str(config))
def etude_max_depl_tabou(problem, range_dep, range_tabou, nb_essais): log = Log() solutions = problem.get_random_solutions(nb_essais) results = [] for i in range_dep: line = [] for j in range_tabou: inter = [] for sol in solutions: inter.append( steepest_hill_climbing_tabou(problem, sol, (i, j), log=log)[2].get_val()) line.append(sum(inter) / len(inter)) results.append(line) fig = plt.figure() axes = fig.gca(projection='3d') z_ax = np.transpose(np.asarray(results)) x_ax, y_ax = np.meshgrid(range_dep, range_tabou) axes.plot_surface(x_ax, y_ax, z_ax, linewidth=0, antialiased=False) axes.set_xlabel("max_depl") axes.set_ylabel("k") plt.show()
class MyTestCase(unittest.TestCase): """ The base class is for all testcase. """ success = "SUCCESS " fail = "FAIL " logger = Log() def setUp(self): self.logger = Log() self.logger.info( '############################### START ###############################' ) self.driver = webdriver.Chrome( "C:\\Users\\Administrator\\AppData\Local\\Google\Chrome\\Application\\chromedriver.exe" ) # self.driver = webdriver.Chrome(driver_path+'\\'+'chromedriver.exe') self.driver.implicitly_wait(30) self.driver.maximize_window() def tearDown(self): time.sleep(5) self.driver.quit() self.logger.info( '############################### End ###############################' ) def my_print(self, msg): logger.info(msg)
def main(): """main function""" if len(sys.argv) > 1: util.USER_ID = sys.argv[1] def mk_for_userid(): util.CACHE_DIR = os.path.join(util.CACHE_DIR, util.USER_ID) + '/' util.RES_DIR = os.path.join(util.RES_DIR, util.USER_ID) + '/' util.LOG_DIR = os.path.join(util.LOG_DIR, util.USER_ID) + '/' util.MODEL_DIR = os.path.join(util.MODEL_DIR, util.USER_ID) + '/' util.SUMMARIES_DIR = os.path.join(util.SUMMARIES_DIR, util.USER_ID) + '/' # util.TRAIN_YAML = util.USER_ID + ".yaml" mk_for_userid() if len(sys.argv) > 2: util.INFER = sys.argv[2] # flag = True util.check_tensorflow_version() util.check_and_mkdir() config = load_yaml() check_config(config) hparams = create_hparams(config) hparams = create_hparams(config) log = Log(hparams) hparams.logger = log.logger print(hparams.values()) import train train.train(hparams)
def steepest_hill_climbing(problem, depart, args=(math.inf), log=None): (max_depl) = args if log is None: log = Log() log.write("START Steepest Hill Climbing", 2) log.write("MAX_DEPL = %s" % str(max_depl), 3) log.write(problem, 3) s = depart nb_depl = 0 optimum_found = False while nb_depl < max_depl and not optimum_found: meilleur_voisin = problem.meilleur_voisin_of_sol(s) if meilleur_voisin is not None and meilleur_voisin.meilleur_que(s): log.write("step %d :\t %s" % (nb_depl, s.tostr()), 5) nb_depl += 1 s = meilleur_voisin else: optimum_found = True log.write("END Steepest Hill Climbing", 2) log.write("Depart : %s" % depart.tostr(), 3) log.write("Optimum local : %s" % s.tostr(), 3) log.write("Trouvé en %d déplacements" % nb_depl, 3) return (depart, nb_depl, s)
class MyTestCase(unittest.TestCase): """ The base class is for all test cases. This is a father . """ success = "SUCCESS " fail = "FAIL " logger = Log() def setUp(self): self.logger = Log() self.logger.info( '############################### START ###############################' ) # chrome_options = Options() # chrome_options.add_argument('--headless') # self.driver = webdriver.Chrome(options=chrome_options, executable_path=driver_path + "\\" + "chromedriver.exe") self.driver = webdriver.Chrome(driver_path + "\\" + "chromedriver.exe") self.driver.maximize_window() self.driver.set_window_size(1920, 1080) self.driver.implicitly_wait(30) def tearDown(self): time.sleep(2) # self.driver.quit() self.logger.info( '############################### END ###############################' ) @staticmethod def my_print(msg): logger.info(msg)
def Run(): cur_dir = os.getcwd() logger = Log(filename="{}.log".format("deploy_agent_vm"), log_dir=cur_dir, console_output=True) run_dir = "%s/%s" % (LOG_DIR, DEFAULT_RUN_NAME) testbed_info = check_file_and_load_data("%s/%s" % (run_dir, SERVER_TESTBED_INFO)) # read host information from testbedInfo.json and update runlist.json # deploy AgentVM to one ESXi # use the other two as test bed jump_vm_ip = testbed_info['genericVm'][0]['ip'] #easy_ssh(logger=logger.log, lin_jump=jump_vm_ip logger.log.info('to be installed vm %s from ova %s' % (AGENT_VM_NAME, AGENT_OVA_PATH)) ovfTool = OVFToolDeployment(Log=logger.log, username=AGENT_HOST_USR, targetHost=AGENT_HOST_IP, password=AGENT_HOST_PWD, filename=AGENT_OVA_PATH, defaults=True) ovfTool.SetOption("name", AGENT_VM_NAME) ovfTool.SetNetwork(AGENT_NETWORK) ovfTool.SetDatastore(AGENT_DATASTORE) cmd = ovfTool.Deploy() #cmd = "sshpass -p \'%s\' ssh %s@%s %s" % (LIN_JUMP_PWD, LIN_JUMP_USR, jump_vm_ip, cmd) cmd = "sshpass -p \'%s\' ssh %s@%s ovftool" % (LIN_JUMP_PWD, LIN_JUMP_USR, jump_vm_ip) logger.log.info('Deploy Agent VM command <%s>' % cmd) (rt, out, err) = run_local_sh_cmd(cmd) logger.log.info("%s %s %s " % (rt, out, err))
def initialize_variables(self, dataset): self.all_layers, self.trainable_layers = (), () self.n_conv_layers = 0 self.n_dense_layers = 0 self.n_relu_layers = 0 self.n_leaky_relu_layers = 0 self.n_bn_layers = 0 self.n_norm_layers = 0 self.n_abs_layers = 0 self.n_maxpool_layers = 0 self.n_upscale_layers = 0 self.n_dropout_layers = 0 self.n_dimshuffle_layers = 0 self.n_reshape_layers = 0 self.R_init = 0 self.learning_rate_init = Cfg.learning_rate.get_value() self.it = 0 self.clock = 0 self.pretrained = False # set to True after pretraining such that dictionary initialization mustn't be repeated self.diag = {} # init an empty dictionary to hold diagnostics self.log = Log(dataset_name=dataset) self.ad_log = AD_Log() self.dense_layers, self.conv_layers, = [], []
def save_args(self, args): args_log = '' for argument in args.__dict__.keys(): args_log += '--%s %s \\\n' % (argument, args.__dict__[argument]) args_path = os.path.join(self.save_root, 'args.txt') args_logger = Log(args_path) args_logger.write(args_log, end='', add_time=False)
def __init__(self, model_name='test', model=None, device=torch.device('cuda:0')): super().__init__(model_name, model, device) self.model_name = model_name self.model_folder = './save/%s/' % model_name self.model = model self.optimizer = None self.logger = Log(model_name, model_folder=self.model_folder, overwrite=False) self.loss_f = nn.CrossEntropyLoss()
def get_log(self): ''' 设置日志文件 ''' logger_name = __name__ file = 'logs/sentiment.log' log = Log(logger_name, file) return log.create_log()
def test_task_category(): log = Log(filename=u'test_task_category.log', name='test_task_category', mode='w') logger = log.Logger task_category(logger=logger, is_test=True)
def test_task_download(is_test=True): log = Log(filename='download.log', name=u'download') logger = log.Logger tasks = task_download(logger=logger, is_test=is_test) for t in tasks: t.start() for t in tasks: t.join()
def __init__(self, file=None): if not file: file = os.path.abspath(os.curdir) + "/config.ini" self.log = Log().handle(__name__) self.log.info(f"Config file: {file}") self.config = configparser.ConfigParser(allow_no_value=True) self.config.read(file)
def test_task_combine(t_n=4,is_test=True,is_cover=False): log = Log(filename='combine.log',name=u'combine') logger = log.Logger tasks = task_combine(logger=logger,t_n=t_n,is_test=is_test,is_cover=is_cover) for t in tasks: t.start() for t in tasks: t.join()
def test_task_book(is_test=True): log = Log(filename='book.log', name=u'book') logger = log.Logger tasks = task_book(logger=logger, is_test=is_test) for t in tasks: t.start() for t in tasks: t.join()
def __init__(self, baseline): ''' Constructor ''' self.logger = Log().getLogger("ParseConfig") self.baseline = baseline.strip() self.externals = "" self.fetch_config() self._check_config()
def test_task_item(is_test=True): log = Log(filename='item.log', name=u'item') logger = log.Logger tasks = task_item(logger=logger, is_test=is_test) for t in tasks: t.start() for t in tasks: t.join()
def __init__(self, _file_path="", verbose=True): self.file_path = _file_path self.verbose = verbose if self.verbose: self.log = Log() # Fix initialize self.total_class = 0 self.sample_by_class = 0
def __init__(self, model_name: 'test', model: nn.Module): super().__init__(model_name, model, config.device) self.model_name = model_name self.model_folder = './save/%s/' % model_name self.model = model.to(config.device) self.optimizer = None self.logger = Log(model_name, model_folder=self.model_folder, overwrite=False) self.loss_f = nn.CrossEntropyLoss() self.best_acc = 0
def prepare_devices(self): try: self.scanner_service.set_camera_config() self.scanner_service.prepare_cams() except: print("Unexpected error:", sys.exc_info()[0]) log = Log() log.log_error('Error in the method set_new_project_config') return {'status': -1} return {'status': 1}
def Run(): cur_dir = os.getcwd() helper = Log(filename="{}.log".format(os.path.basename(__file__)), log_dir=cur_dir, console_output=True) args.log.info("start deploying VCSA into ESX.") cmd = "pwd" rt, out, err = run_local_sh_cmd(cmd) args.log.info("need figure out a path again %s %s %s ." % (rt, out, err)) args.log.info("end of deploying VCSA into ESX.")
def log_model(self, model): model_log_path = os.path.join(self.save_root, 'model.txt') model_logger = Log(model_log_path) num_params = 0 for param in model.parameters(): num_params += param.numel() model_logger.write(log=str(model), add_time=False) model_logger.write(log='Total number of parameters: %d' % num_params, add_time=False)
def main(): env = gym.make('CartPole-v1') model = PPO() score = 0.0 print_interval = 20 log = Log(__file__[:-3]) wandb.init(project="dgdfhdfh") wandb.watch(model) for n_epi in range(600): s = env.reset() done = False epsilon = max(0.01, args.epsilon - 0.01 * (n_epi / 200)) loss_temp = 0.0 while not done: for t in range(args.T_horizon): prob = model.pi(torch.from_numpy(s).float()) m = Categorical(prob) a = m.sample().item() coin = random.random() if coin < epsilon: a = random.randint(0, 1) s_prime, r, done, info = env.step(a) model.put_data( (s, a, r / 100.0, s_prime, prob[a].item(), done)) s = s_prime score += r if done: break loss_temp += model.train_net() if n_epi % print_interval == 0 and n_epi != 0: log.info("episode :{}, avg score : {:.1f}".format( n_epi, score / print_interval)) wandb.log({ 'score': score / print_interval, 'epsilon': epsilon, 'loss': loss_temp / print_interval }) score = 0.0 if n_epi % 500 == 0 and n_epi != 0: saved_model_name = 'ppo' + str(n_epi) + '.pt' torch.save(model.state_dict(), os.path.join(wandb.run.dir, saved_model_name)) env.close()
def setUp(self): # self.uri = BaseConfig().get_base_info().get("URI") # self.signId = BaseConfig().get_base_info().get("KEY") # self.customerId = BaseConfig().get_base_info().get("CUSTOMERID") self.setting = setting.get_setting() self.uri = self.setting['URI'] self.signId = self.setting['KEY'] self.customerId = self.setting['CUSTOMERID'] self.port = 'account/balance' self.method = 'GET' self.log = Log("TestAccountBalance").print_log()
def __init__(self): Shell.__init__(self) self.update_prompt("~") self.abspath = os.path.dirname(os.path.abspath(__file__)) self.modules = {} self.module_names = [] self.log = Log() self.initiate_modules() self.module = None