def match(self): method = request.method url = request.path static_func = self.static_routes[method].get(url) if static_func is not None: return static_func() for pattern_arg, dynamic_func in self.dynamic_routes[method].items(): s = re.compile(pattern_arg[0]).match(url) if s is not None: params = make_list(s.groups()) args = pattern_arg[1] for index, arg in enumerate(args): if arg.endswith('_int_'): params[index] = int(params[index]) if arg.endswith('_float_'): params[index] = float(params[index]) return dynamic_func(*params) # method not allowed for met, route in self.static_routes.items(): if met == method: continue if route.get(url) is not None: raise not_allowed() for met, route in self.dynamic_routes.items(): if met == method: continue for pair in route.keys(): if re.compile(pair[0]).match(url) is not None: raise not_allowed() # no match raise not_found()
def match(self): method = request.method url = request.path static_func = self.static_routes[method].get(url) if static_func is not None: return static_func() for pattern_arg, dynamic_func in self.dynamic_routes[method].items(): s = re.compile(pattern_arg[0]).match(url) if s is not None: params = make_list(s.groups()) args = pattern_arg[1] for index, arg in enumerate(args): if arg.endswith('_int_'): params[index] = int(params[index]) if arg.endswith('_float_'): params[index] = float(params[index]) return dynamic_func(*params) # method not allowed for met, route in self.static_routes.items(): if met == method: continue if route.get(url) is not None: raise not_allowed() for met, route in self.dynamic_routes.items(): if met == method: continue for pair in route.keys(): if re.compile(pair[0]).match(url) is not None: raise not_allowed() # no match raise not_found()
def getTasks(self, tags=[], without_tags=[], show='open'): assert (show in ('all', 'open', 'closed')) query = Task.query tags = make_list(tags) for tag in tags: query = query.filter(Task.tags.any(title=tag)) without_tags = make_list(without_tags) for tag in without_tags: query = query.filter(not_(Task.tags.any(title=tag))) if show != 'all': query = query.filter_by(done=(show == 'closed')) return query.all()
def getTasks(self, tags = [], without_tags = [], show = 'open'): assert(show in ('all', 'open', 'closed')) query = Task.query tags = make_list(tags) for tag in tags: query = query.filter(Task.tags.any(title = tag)) without_tags = make_list(without_tags) for tag in without_tags: query = query.filter(not_(Task.tags.any(title = tag))) if show != 'all': query = query.filter_by(done = (show == 'closed')) return query.all()
def construct_seed(serise_id, brand, serise, brand_id, p_type): """这里是seed的装配厂""" # 首先确定年份和月份 current_year = datetime.datetime.today().strftime("%Y") current_month = datetime.datetime.today().strftime("%m") # 城市集合以及年份集合 city_list = make_list(city_list_file, index=1, blank=blank) year_list = [i for i in range(2000, int(current_year) + 1)] # 年份从大到小 year_list.reverse() for year in year_list: # 载入月份 if year != int(current_year): for m in [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]: city = random.choice(city_list) month = str(random.choice(m)) url = seed_url.format(city, serise_id, ''.join([str(year), month])) insert_seed_save(url, brand, serise, city, str(year) + '-' + month, brand_id, serise_id, p_type) else: # 当前年份 for m in range(1, int(current_month) + 1): city = random.choice(city_list) month = str(m) url = seed_url.format(city, serise_id, ''.join([str(year), month])) insert_seed_save(url, brand, serise, city, str(year) + '-' + month, brand_id, serise_id, p_type) return
def getTagsRelated(self, tags): tags = make_list(tags) tasks = session.query(Task.id).filter( Task.tags.any(Tag.title.in_(tags))) task_ids = [t[0] for t in tasks] new_tags = Tag.query.filter(Tag.tasks.any(Task.id.in_(task_ids))) \ .filter(not_(Tag.title.in_(tags))) return new_tags.all()
def load_cookies_list(self): """返回cookie列表 交给schedule去调用 seed合并cookie放入队列里 """ cookie_list = make_list(file_path=config.user_info_file, index='', blank='') return cookie_list
def temporal_block_qgru(inp, num_filters_gru=0, dropout=0): for idx, nb_rnn_filt in enumerate(make_list(num_filters_gru)): inp = Bidirectional(QuaternionGRU(nb_rnn_filt, activation='tanh', dropout=dropout, recurrent_dropout=dropout, return_sequences=True), merge_mode='mul')(inp) return inp
def getTagsRelated(self, tags): tags = make_list(tags) tasks = session.query(Task.id).filter( Task.tags.any(Tag.title.in_(tags))) task_ids = [t[0] for t in tasks] new_tags = Tag.query.filter(Tag.tasks.any(Task.id.in_(task_ids))) \ .filter(not_(Tag.title.in_(tags))) return new_tags.all()
def temporal_block_gru(inp, num_filters_gru=0, dropout=0, data_in=(), input_data_format='channels_last'): for idx, nb_rnn_filt in enumerate(make_list(num_filters_gru)): inp = Bidirectional(GRU(nb_rnn_filt, activation='tanh', dropout=dropout, recurrent_dropout=dropout, return_sequences=True), merge_mode='mul')(inp) return inp
def _decorator(func): for verb in make_list(methods): verb = verb.upper() route = Route(self, rule, verb, func) self.add_route(route) return func
def script_from_yaml(filename, jobdir="../jobs/", truncate=2): # Read yaml card and put parameters in dictionary f = open(filename) card = yaml.load(f) # New job directory if one wants to keep the files if "jobdir" in card: jobdir = card["jobdir"] # Proc card proc_info = None if "proc" in card: proc_info = card["proc"] # Param card param_info = None if "param" in card: param_info = card["param"] # Run card run_info = None if "run" in card: run_info = card["run"] # Output options output_info = None if "output" in card: output_info = card["output"] # If gridpack, get grid_info # and update run card if needed grid_info = None if "gridpack" in card: grid_info = card["gridpack"] if grid_info["status"] == 0: run_info["gridpack"] = "True" run_info["repeat"] = 1 # Possible decays decay_info = None if "decays" in card: decay_info = card["decays"] # Pythia 6 info pythia6_info = None if "pythia6" in card: pythia6_info = card["pythia6"] # Pythia 8 info pythia8_info = None if "pythia8" in card: pythia8_info = card["pythia8"] # Delphes info delphes_info = None if "delphes" in card: delphes_info = card["delphes"] # Cluster options cluster_info = None if "cluster" in card: cluster_info = card["cluster"] # Make a job directory with name model_process_output proc_suffix = '' for p in sorted(proc_info.keys()): proc_suffix += re.sub("[~>]", '', re.sub('\s', '_', proc_info[p])) proc_suffix = proc_suffix.replace(',', '_') proc_suffix += '_' out_dir = jobdir + proc_suffix.rstrip('_') call(['mkdir', '-p', out_dir]) # Edit the proc card and copy it into job dir cd.proc_card_edit(proc_info, out_dir) # Edit the run card and copy it into job dir cd.run_card_edit(run_info, out_dir) # Edit the pythia6 card and copy it into job dir cd.pythia6_card_edit(pythia6_info, out_dir) # Edit the pythia8 card and copy it into job dir cd.move_card(pythia8_info["card"], out_dir, "pythia8") # Edit the delphes card and copy it into job dir print(delphes_info) cd.move_card(delphes_info, out_dir, "delphes") # Get the job launch command if cluster cluster_command = os.environ["CLUS_LAUNCH"] \ if cluster_info != None else "" # Iterate over the cartesian product of # parameters in the param card param_prod = (dict(zip(param_info.keys(), x)) for x in product( *[make_list(param_info[k]) for k in param_info.keys()])) job_ids = [] for p in param_prod: print(p) par_name = cd.param_card_edit(p, decay_info, proc_info['model'], out_dir, truncate=truncate) # Launch the run par_name = par_name.replace("param_card_", '').replace(".dat", '') repeat = 1 if "repeat" not in run_info else run_info["repeat"] job_id = run_events(out_dir, par_name, cluster_info, output_info, pythia8_info["exec"], grid_info, repeat, cluster_command) job_ids += job_id # If cluster, launch process checking jobs and cleanup the job directory # when all the jobs are done running if cluster_info != None: time = cluster_info[os.environ["CLUS_TIME"]] submit_command = [cluster_command] + list( sum([['-' + c, str(cluster_info[c])] for c in cluster_info], [])) submit_command += [executable] + ["-c"] jobpath = str(pathlib.Path(out_dir).resolve()) # Put all job ids in a file filename = "{}/job_ids_{}.txt".format(jobpath, str(uuid4())) print('\n'.join(map(str, job_ids)), file=open(filename, 'w')) python_command = "import check_processes as ch;ch.cleanup_job_dir('{}', '{}', {})".format( jobpath, filename, time) submit_command += [python_command] print(submit_command) call(submit_command)
def _decorator(func): for verb in make_list(methods): verb = verb.upper() route = Route(self, rule, verb, func) self.add_route(route) return func
def __init__(self, content, style, style_weight, denoising_weight, learning_rate): # prepare VGG models self.vgg_content_shaped = VGG19(include_top=False, input_shape=content.shape) self.vgg_style_shaped = VGG19(include_top=False, input_shape=style.shape) content_model = Model( inputs=self.vgg_content_shaped.input, outputs=[ self.vgg_content_shaped.get_layer(layer).output for layer in CONTENT_LAYERS ]) style_model_style_shaped = Model( inputs=self.vgg_style_shaped.input, outputs=[ self.vgg_style_shaped.get_layer(layer).output for layer in STYLE_LAYERS ]) style_model_content_shaped = Model( inputs=self.vgg_content_shaped.input, outputs=[ self.vgg_content_shaped.get_layer(layer).output for layer in STYLE_LAYERS ]) content = tf.constant(preprocess_input(content)) style = tf.constant(preprocess_input(style)) self.image = tf.Variable(tf.truncated_normal(tf.shape(content))) # content loss content_activations_original = make_list( content_model(tf.expand_dims(content, axis=0))) content_activations_created = make_list( content_model(tf.expand_dims(self.image, axis=0))) self.content_loss = 0 for original, created in zip(content_activations_original, content_activations_created): self.content_loss += tf.reduce_mean((original - created)**2) self.content_loss /= len(CONTENT_LAYERS) # style loss style_activations_original = make_list( style_model_style_shaped(tf.expand_dims(style, axis=0))) style_activations_created = make_list( style_model_content_shaped(tf.expand_dims(self.image, axis=0))) self.style_loss = 0 for original, created in zip(style_activations_original, style_activations_created): self.style_loss += tf.reduce_mean( (normalized_gram_matrix(original) - normalized_gram_matrix(created))**2) self.style_loss /= len(style_activations_original) self.style_loss *= style_weight # total variation denoising loss self.denoising_loss = denoising_weight * denoising_loss(self.image) self.loss = self.content_loss + self.style_loss + self.denoising_loss optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) self.train_step = optimizer.minimize(self.loss, var_list=[self.image])
# use CPU or GPU parser = argparse.ArgumentParser() parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU1 ID (negative value indicates CPU)') parser.add_argument('--process', '-p', type=int, default=1, help='number of process(es)') args = parser.parse_args() # load data data = make_list(opt.root) # setup dataset iterator dataset = chainer.datasets.TransformDataset(data, preprocess) valid_dataset, train_dataset = chainer.datasets.split_dataset_random( dataset, 16) if args.process > 1: train_iter = chainer.iterators.MultiprocessIterator( train_dataset, opt.batchsize, n_processes=arg.process) valid_iter = chainer.iterators.MultiprocessIterator( valid_dataset, opt.batchsize, repeat=False, shuffle=False, n_processes=arg.process)
def zapocni(self): s = Semafor(Sport(utils.make_list('players_home.txt')))