def get_params(key, pt_dd): params = {} if 'colors' in pt_dd: params['color'] = U.get_param(pt_dd['colors'], key) if 'labels' in pt_dd: params['label'] = U.get_param(pt_dd['labels'], key) return params
def mine_transaction(): data = request.get_json() address = hex_to_bytes(get_param(data, 'address')) amount = Decimal(get_param(data, 'amount')) block = app.blockchain.generate_next_with_transaction( app.wallet, address, amount) return jsonify(block.to_raw() if block else None)
def get_params(gk, pt_dd): params = {} if 'colors' in pt_dd: params['color'] = U.get_param(pt_dd['colors'], gk) if 'labels' in pt_dd: params['label'] = U.get_param(pt_dd['labels'], gk) return params
def get_params(dsetk, prop_key, pt_dd, c): params = {} if 'colors' in pt_dd: params['color'] = U.get_param(pt_dd['colors'], prop_key) if c == 0: # to avoid duplicative legends for all subplots if 'labels' in pt_dd: params['label'] = U.get_param(pt_dd['labels'], prop_key) return params
def send_transaction(): data = request.get_json() print('/sendTransaction data: {}'.format(data)) address = hex_to_bytes(get_param(data, 'address')) amount = Decimal(get_param(data, 'amount')) print('address: {}, amount: {}'.format(address, amount)) tx = app.blockchain.send_transaction(app.wallet, address, amount) return jsonify(tx.to_raw() if tx else None)
def DNN_regressor(params, model_dir, feature_columns, config): '''Returns DNN estimator object''' hidden_units = params['layers'] * [params['units']] weight_column_name = utils.get_param(params, 'weight_column_name') optimizer = utils.get_optimizer(utils.get_param(params, 'optimizer'), params['learning_rate']) activation_fn = utils.get_activation( utils.get_param(params, 'activation_fn')) dropout = float(utils.get_param(params, 'dropout')) gradient_clip_norm = utils.get_param(params, 'gradient_clip_norm') enable_centered_bias = False # keep false feature_engineering_fn = utils.get_param(params, 'feature_engineering_fn') embedding_lr_multipliers = utils.get_param(params, 'embedding_lr_multipliers') input_layer_min_slice_size = utils.get_param(params, 'input_layer_min_slice_size') label_keys = utils.get_param(params, 'label_keys') return tf.contrib.learn.DNNRegressor( hidden_units=hidden_units, feature_columns=feature_columns, model_dir=model_dir, weight_column_name=weight_column_name, optimizer=optimizer, activation_fn=activation_fn, dropout=dropout, gradient_clip_norm=gradient_clip_norm, enable_centered_bias=enable_centered_bias, config=config, feature_engineering_fn=feature_engineering_fn, embedding_lr_multipliers=embedding_lr_multipliers, input_layer_min_slice_size=input_layer_min_slice_size)
def get_params(gk, pt_dd): params = {} if 'colors' in pt_dd: params['color'] = U.get_param(pt_dd['colors'], gk) if 'ecolor' in pt_dd: params['ecolor'] = U.get_param(pt_dd['ecolors'], gk) else: params['ecolor'] = 'black' if 'labels' in pt_dd: params['label'] = U.get_param(pt_dd['labels'], gk) else: params['label'] = gk return params
def get_params(key, pt_dd): params = {} if 'colors' in pt_dd: params['color'] = U.get_param(pt_dd['colors'], key) if 'labels' in pt_dd: print pt_dd['labels'], key v = U.get_param(pt_dd['labels'], key) if v: params['label'] = v else: params['label'] = key if 'linewidth' in pt_dd: params['linewidth'] = pt_dd['linewidth'] return params
def tensor_forest(params): '''Returns tensorforest estimator object''' num_classes = int(utils.get_param(params, 'num_classes')) num_features = int(utils.get_param(params, 'num_features')) regression = bool(utils.get_param(params, 'regression')) num_trees = int(utils.get_param(params, 'num_trees')) max_nodes = int(utils.get_param(params, 'max_nodes')) tensor_forest_params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams( num_classes=num_classes, num_features=num_features, regression=regression, num_trees=num_trees, max_nodes=max_nodes) return tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator( tensor_forest_params)
def imap(data, A, C, **kw): """imap: interaction map""" logger.info('start plotting interaction map...') pt_dd = U.get_pt_dd(C, A.property, A.plot_type) fig = plt.figure(figsize=(12,9)) col, row = U.gen_rc(len(data.keys()), pt_dd) grid = ImageGrid(fig, 111, nrows_ncols = (row, col), axes_pad = 0.3, add_all=True, label_mode = "L") if 'denorminators' in pt_dd: for gk in data: dn = U.get_param(pt_dd['denorminators'], gk) logger.info('denorminator: {0}'.format(dn)) data[gk] = data[gk] / dn # used to set up the reference point and the range of color bar max_ = get_max(data) for k, gk in enumerate(data.keys()): ax = grid[k] da = data[gk] rda = da logger.info('map max: {0}; map min: {1}'.format(rda.max(), rda.min())) # JUST FOR REFERENCE OF SEEING WHERE THE END POINTS ARE, DEBUGGING USES # rda[-1][-1] = max_ # rda[0][-1] = max_ # sophisticated reversal to make x axis donor, y axis acceptor # rda = np.array([i[::-1] for i in da.transpose()[::-1]]) # sophisticated reversal to make x axis acceptor, y axis donor # rda = np.array([i[::-1] for i in da[::-1]]) params = get_params(gk, pt_dd) logger.info(params) # cmap_options: hot, gist_heat, Orange (printer friendly) # remove the info about the Hbonding information about first residue, # which ACE, this make the final map easier to understand rda = np.delete(np.delete(rda, 0, 0), 0, 1) im = ax.pcolormesh(rda, **params) if 'clim' in pt_dd: im.set_clim(**pt_dd['clim']) else: im.set_clim(0, max_) logger.info('shape after removal of the 0th residue: {0}'.format(rda.shape)) ax.set_xlim([0, rda.shape[0]]) ax.set_ylim([0, rda.shape[1]]) ax.minorticks_on() decorate_ax(ax, pt_dd, gk) plt.colorbar(im, shrink=.5, orientation='vertical', anchor=(1.3, 0)) plt.savefig(U.gen_output_filename(A, C), **pt_dd.get('savefig', {}))
def calc_alx(h5, gk, grp, prop_obj, prop_dd, A, C): grp_tb = fetch_grp_tb(h5, grp, prop_obj.name) # x assumed to be FIELD_0_NAME tb0 = grp_tb[0] xf = tb0._f_getAttr('FIELD_0_NAME') # xf: xfield, and it's assumed to be # the same in all tabls in the grp_tb min_len = min(tb.read(field=xf).shape[0] for tb in grp_tb) _l = [] ref_col = grp_tb[0].read(field=xf)[:min_len] for tb in grp_tb: col1 = tb.read(field=xf)[:min_len] assert (col1 == ref_col).all() == True col2 = tb.read(field=prop_obj.ifield)[:min_len] _l.append(col2) _a = np.array(_l) y = _a.mean(axis=0) ye = np.array([U.sem(_a[:,i]) for i in xrange(len(_a[0]))]) # ye = stats.sem(_a, axis=0) if 'xdenorm' in prop_dd: ref_col = ref_col / float(prop_dd['xdenorm']) if 'denorminators' in prop_dd: denorm = float(U.get_param(prop_dd['denorminators'], gk)) y, ye = y / denorm, ye / denorm _aa = np.array([ref_col, y, ye]) prop_dd = U.get_prop_dd(C, prop_obj.name) # nb_blave: number of blocks for block averaging n = int(prop_dd.get('nb_blave', 100)) res = block_average(_aa, n) return res
def decorate_ax(ax, gk, pt_dd, ncol, nrow, c): if c < (ncol * nrow - ncol): ax.set_xticklabels([]) # ax.get_xaxis().set_visible(False) # this hide the whole axis else: if 'xlabel' in pt_dd: ax.set_xlabel('$\phi$') if c % ncol == 0: if 'ylabel' in pt_dd: ax.set_ylabel('$\psi$') else: ax.set_yticklabels([]) if 'grid' in pt_dd: ax.grid(**pt_dd['grid']) else: ax.grid(which='both') if 'xlim' in pt_dd: ax.set_xlim(**pt_dd['xlim']) else: ax.set_xlim([-180, 180]) if 'ylim' in pt_dd: ax.set_ylim(**pt_dd['ylim']) else: ax.set_ylim([-180, 180]) if 'xscale' in pt_dd: ax.set_xscale(**pt_dd['xscale']) if 'titles' in pt_dd: ax.set_title(U.get_param(pt_dd['titles'], gk))
def basketball(request): params = {} start_date, end_date = parse_date_range(request) league_name = get_param(request, 'league_name', '') team = get_param(request, 'team', '') params['start_date'] = start_date params['end_date'] = end_date params['league_name'] = league_name params['team'] = team games = db_utils.get_all_basketball_games().filter(datetime__gte=start_date, datetime__lte=end_date) if league_name != '': games = games.filter(league_name__contains=league_name) # if team != '': # games = games.filter(team__contains=team) params['games'] = games return render_to_response("history/basketball.html", params, context_instance=RequestContext(request))
def get_params(gk, pt_dd, i): params = {} _ = 'colors{0}'.format(i) if _ in pt_dd: params['color'] = U.get_param(pt_dd[_], gk) _ = 'markers{0}'.format(i) if _ in pt_dd: params['marker'] = U.get_param(pt_dd[_], gk) _ = 'labels{0}'.format(i) if _ in pt_dd: params['label'] = U.get_param(pt_dd[_], gk) else: params['label'] = gk return params
def api_statements(request, count=None): """A view of a JSON-serialized, reverse-chronologically-ordered set of statements. """ if not count: raise Http404 count = count.isdigit() and int(count) or 0 offset = get_numeric_param(request, "offset") or 0 tag = get_param(request, "tag") if tag: qs = Statement.objects.published().filter( tag__slug=tag ) else: qs = Statement.objects.published() statements = qs.only('id', 'text', 'tag')[offset:(offset + count)] if len(statements) == 0: raise Http404, "Offset is too large." statements_new_keys = [] for statement in statements: statements_new_keys.append(dict( id=statement.id, statement=statement.text, tag=[statement.tag.slug, statement.tag.tag, statement.tag.color] )) statements = simplejson.dumps(statements_new_keys, ensure_ascii=False) return HttpResponse(statements, mimetype="application/json")
def mp_alx(data, A, C, **kw): """alx for multiple properties (mp)""" pt_dd = U.get_pt_dd(C, '_'.join(A.properties), A.plotmp_type) dsets = grp_datasets(data, pt_dd) fig = plt.figure(figsize=(12,9)) if 'subplots_adjust' in pt_dd: fig.subplots_adjust(**pt_dd['subplots_adjust']) ncol, nrow = U.gen_rc(len(dsets.keys()), pt_dd) logger.info('Chosen # of cols: {0}, # of rows; {1}'.format(ncol, nrow)) for c, sys_key in enumerate(dsets.keys()): ax = fig.add_subplot(nrow, ncol, c+1) for prop_key in dsets[sys_key]: da = dsets[sys_key][prop_key] params = get_params(sys_key, prop_key, pt_dd, c) ax.plot(da[0], da[1], **params) ax.fill_between(da[0], da[1]-da[2], da[1]+da[2], where=None, facecolor=params.get('color'), alpha=.3) if 'texts' in pt_dd: ax.text(**U.get_param(pt_dd['texts'], sys_key)) decorate_ax(ax, pt_dd, ncol, nrow, c) plt.savefig(U.gen_output_filename(A, C), **pt_dd.get('savefig', {}))
def mp_alx(data, A, C, **kw): """alx for multiple properties (mp)""" pt_dd = U.get_pt_dd(C, '_'.join(A.properties), A.plotmp_type) dsets = grp_datasets(data, pt_dd) fig = plt.figure(figsize=(12, 9)) if 'subplots_adjust' in pt_dd: fig.subplots_adjust(**pt_dd['subplots_adjust']) ncol, nrow = U.gen_rc(len(dsets.keys()), pt_dd) logger.info('Chosen # of cols: {0}, # of rows; {1}'.format(ncol, nrow)) for c, sys_key in enumerate(dsets.keys()): ax = fig.add_subplot(nrow, ncol, c + 1) for prop_key in dsets[sys_key]: da = dsets[sys_key][prop_key] params = get_params(sys_key, prop_key, pt_dd, c) ax.plot(da[0], da[1], **params) ax.fill_between(da[0], da[1] - da[2], da[1] + da[2], where=None, facecolor=params.get('color'), alpha=.3) if 'texts' in pt_dd: ax.text(**U.get_param(pt_dd['texts'], sys_key)) decorate_ax(ax, pt_dd, ncol, nrow, c) plt.savefig(U.gen_output_filename(A, C), **pt_dd.get('savefig', {}))
def initialize(): """ Reset the driver and connection """ profile = webdriver.FirefoxProfile() set_download_profile(profile) driver = webdriver.Firefox(firefox_profile=profile) # login loginurl = utils.get_param('loginurl') driver.get(loginurl) fill_field(driver, "input", "id", "username", utils.get_param('username')) fill_field(driver, "input", "id", "password", utils.get_param('password')) click_validate(driver, "button", "name", "_eventId_proceed") print("login successful") return (driver)
def decorate_ax(ax, pt_dd, gk): if 'xlabel' in pt_dd: ax.set_xlabel(**pt_dd['xlabel']) if 'ylabel' in pt_dd: ax.set_ylabel(**pt_dd['ylabel']) if 'titles' in pt_dd: ax.set_title(U.get_param(pt_dd['titles'], gk)) else: ax.set_title(gk) if 'grid' in pt_dd: ax.grid(**pt_dd['grid'])
def run_svd(data, params, svdpp=False): '''Returns trained SVD model based on matrix factorization''' if svdpp: alg = SVDpp(n_factors=utils.get_param(params, 'n_factors'), n_epochs=utils.get_param(params, 'n_epochs'), lr_all=utils.get_param(params, 'learning_rate'), reg_all=utils.get_param(params, 'reg'), verbose=True) else: alg = SVD(biased=utils.get_param(params, 'biased'), n_factors=utils.get_param(params, 'n_factors'), n_epochs=utils.get_param(params, 'n_epochs'), lr_all=utils.get_param(params, 'learning_rate'), reg_all=utils.get_param(params, 'reg'), verbose=True) alg.fit(data) return alg
def calc_means(h5, gk, grp, prop_obj, prop_dd, A, C): grp_tb = fetch_grp_tb(h5, grp, prop_obj.name) _l = [] for tb in grp_tb: _ = tb.read(field=prop_obj.ifield).mean() _l.append(_) if 'denorminators' in prop_dd: denorm = float(U.get_param(prop_dd['denorminators'], gk)) logger.info('denormator: {0}'.format(denorm)) return np.array([np.mean(_l) / denorm, U.sem(_l) / denorm]) return np.array([np.mean(_l), U.sem(_l)])
def get_repos_info(self, org_name, repo_name, contains, start): """get repository info""" repo_url = self.baseurl + "/repos/" + org_name.lower( ) + "/" + repo_name response = requests.get(repo_url, params=utils.get_param(1), headers=self.headers) if response is None: logging.error("url does not exist") return if contains is None and start is None: print(org_name + "\t" + repo_name) else: print(org_name + "\t" + repo_name + "\t" + contains + "\t" + start)
def draw_loop(start, end, step, x_ptr, y_ptr): global Color set_param(start) while get_param() <= end: x, y = cal_coord(x_ptr, y_ptr) if Color == 'RED': plt.plot(x, y, 'r.') elif Color == 'GREEN': plt.plot(x, y, 'g.') elif Color == 'BLUE': plt.plot(x, y, 'b.') else: plt.plot(x, y, 'k.') change_param(step)
def get_orgs_info(self, org_name, contains, start): """get organization info""" orgs_url = self.baseurl + "/orgs/" + org_name response = requests.get(orgs_url, params=utils.get_param(1), headers=self.headers) temp = response.json() print("orgs:\t" + temp.get("login")) print("url:\t" + temp.get("url")) print("=================================================") reps_url = orgs_url + "/repos" response = requests.get(reps_url, params=utils.get_param(1), headers=self.headers) total_page = response.headers["total_page"] page = 1 count = 0 while page <= int(total_page): response = requests.get(reps_url, params=utils.get_param(page), headers=self.headers) page += 1 for each in response.json(): print_each = True temp = each.get("human_name").split("/")[-1] if contains is not None: print_each = utils.is_contains(temp, contains) if print_each and start is not None: print_each = utils.is_start_with(temp, start) if print_each: print(temp + "\n\t" + each.get("url")) count += 1 print("=================================================") print("Find " + str(count) + " satisfied")
def get_params(gk, pt_dd): params = {} if 'cmaps' in pt_dd: # params['cmap'] = getattr(cm, U.get_param(pt_dd['cmaps'], gk)) params['cmap'] = getattr(cm, pt_dd['cmaps']) params['cmap'].set_over('white') if 'levels' in pt_dd: _ = U.get_param(pt_dd['levels'], gk) if _: min_, max_, step = _ # +1 so that max_ will be included in the final levels params['levels'] = range(min_, max_+1, step) # the potential energy map usually does not need color and label decoration return params
def get_params(gk, pt_dd): params = {} if 'cmaps' in pt_dd: # params['cmap'] = getattr(cm, U.get_param(pt_dd['cmaps'], gk)) params['cmap'] = getattr(cm, pt_dd['cmaps']) params['cmap'].set_over('white') if 'levels' in pt_dd: _ = U.get_param(pt_dd['levels'], gk) if _: min_, max_, step = _ # +1 so that max_ will be included in the final levels params['levels'] = range(min_, max_ + 1, step) # the potential energy map usually does not need color and label decoration return params
def set_download_profile(profile): profile.set_preference('browser.download.folderList', 2) profile.set_preference('browser.download.manager.showWhenStarting', False) profile.set_preference('browser.download.dir', utils.get_param('downloaddir')) profile.set_preference('browser.helperApps.neverAsk.openFile', 'text/csv,text/plain') profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'text/csv,text/plain') profile.set_preference('browser.helperApps.alwaysAsk.force', False) profile.set_preference('browser.download.manager.alertOnEXEOpen', False) profile.set_preference('browser.download.manager.focusWhenStarting', False) profile.set_preference('browser.download.manager.useWindow', False) profile.set_preference('browser.download.manager.showAlertOnComplete', False) profile.set_preference('browser.download.manager.closeWhenDone', False)
def default_handler(request): status = '200 OK' header = [('Content-Type', 'text/html')] req_method = request['REQUEST_METHOD'] path_info = request['PATH_INFO'] #only for post env_param = utils.get_param(request) method = path_info[path_info.rindex('/')+1:] logging.info("req_method=%s, path_info=%s, param=%s, method=%s", req_method, path_info, env_param, method) # get the method # method is '' or None print method if not method: ret_val = [1, "Empty Method"] status = '405 Method Not Allowed' return status, header, ret_val return mobile_handler.mobile_handler(request, env_param, method)
def initialize(): driver = navigation.initialize() # go to saved search navigation.wait_and_click(driver, "li", "data-vs-value", "load-search-section") navigation.wait_and_click(driver, "div", "class", "wm-close-button walkme-x-button") print("Saved searches") # retrieve first saved search navigation.wait_for_element(driver, "ul", "class", "user-data-item-folder") navigation.click( driver, driver.find_element_by_xpath( "//ul[@class='user-data-item-folder']/li[1]//span[@data-ajax-submit='click:Search:LoadSearch']" )) print("First saved search") navigation.wait_for_element(driver, "td", "class", "search-step") driver.get(utils.get_param('collecturl')) return (driver)
def pot_ener_map(data, A, C, **kw): for k in data.keys(): data[k] = pickle.loads(data[k]) adjust_minima(data) pt_dd = U.get_pt_dd(C, A.property, A.plot_type) logger.info(pt_dd) ncol, nrow = U.gen_rc(len(data.keys()), pt_dd) fig, axes = plt.subplots(nrows=nrow, ncols=ncol, figsize=(ncol * 7, nrow * 6)) axes = axes.flat if 'subplots_adjust' in pt_dd: fig.subplots_adjust(**pt_dd['subplots_adjust']) for c, gk in enumerate(data.keys()): ax = axes[c] [phis, psis], da = data[gk] # this is just for determining the proper levels logger.info('min, max of the original map: {0}, {1}'.format( da.min(), da.max())) # further process da, mainly about removing peaks if 'levels' in pt_dd: min_, max_, step = U.get_param(pt_dd['levels'], gk) logger.info('min, max, step from pt_dd: {0}, {1}, {2}'.format( min_, max_, step)) params = get_params(gk, pt_dd) contour = ax.contourf(phis, psis, da, **params) decorate_ax(ax, gk, pt_dd, ncol, nrow, c) cax = fig.add_axes([0.92, 0.2, 0.02, 0.6]) # left, bottom, width, hight cbar = plt.colorbar(contour, cax=cax) if 'cbar_ylabel' in pt_dd: cbar.ax.set_ylabel(**pt_dd['cbar_ylabel']) plt.savefig(U.gen_output_filename(A, C), **pt_dd.get('savefig', {}))
def football_impl(request): params = {} start_date, end_date = parse_date_range(request) league_name = get_param(request, 'league_name', '') team = get_param(request, 'team', '') params['start_date'] = start_date params['end_date'] = end_date params['league_name'] = league_name params['team'] = team original_handicap = get_param(request, 'original_handicap', '') original_handicap_host = get_param(request, 'original_handicap_host', '') original_handicap_away = get_param(request, 'original_handicap_away', '') final_handicap = get_param(request, 'final_handicap', '') final_handicap_host = get_param(request, 'final_handicap_host', '') final_handicap_away = get_param(request, 'final_handicap_away', '') params['original_handicap'] = original_handicap params['original_handicap_host'] = original_handicap_host params['original_handicap_away'] = original_handicap_away params['final_handicap'] = final_handicap params['final_handicap_host'] = final_handicap_host params['final_handicap_away'] = final_handicap_away games = db_utils.get_all_football_games().filter(datetime__gte=start_date, datetime__lte=end_date) if league_name != '': games = games.filter(leagueName__contains=league_name) if team != '': games = games.filter(team__contains=team) if original_handicap != '': games = games.filter(footballhistorysummary__iHC=original_handicap) if final_handicap != '': games = games.filter(footballhistorysummary__fHC=final_handicap) params['games'] = games return render_to_response("history/football.html", params, context_instance=RequestContext(request))
def pot_ener_map(data, A, C, **kw): for k in data.keys(): data[k] = pickle.loads(data[k]) adjust_minima(data) pt_dd = U.get_pt_dd(C, A.property, A.plot_type) logger.info(pt_dd) ncol, nrow = U.gen_rc(len(data.keys()), pt_dd) fig, axes = plt.subplots(nrows=nrow, ncols=ncol, figsize=(ncol*7, nrow*6)) axes = axes.flat if 'subplots_adjust' in pt_dd: fig.subplots_adjust(**pt_dd['subplots_adjust']) for c, gk in enumerate(data.keys()): ax = axes[c] [phis, psis], da = data[gk] # this is just for determining the proper levels logger.info('min, max of the original map: {0}, {1}'.format(da.min(), da.max())) # further process da, mainly about removing peaks if 'levels' in pt_dd: min_, max_, step = U.get_param(pt_dd['levels'], gk) logger.info( 'min, max, step from pt_dd: {0}, {1}, {2}'.format( min_, max_, step)) params = get_params(gk, pt_dd) contour = ax.contourf(phis, psis, da, **params) decorate_ax(ax, gk, pt_dd, ncol, nrow, c) cax = fig.add_axes([0.92, 0.2, 0.02, 0.6]) # left, bottom, width, hight cbar = plt.colorbar(contour, cax=cax) if 'cbar_ylabel' in pt_dd: cbar.ax.set_ylabel(**pt_dd['cbar_ylabel']) plt.savefig(U.gen_output_filename(A, C), **pt_dd.get('savefig', {}))
def imap(data, A, C, **kw): """imap: interaction map""" logger.info('start plotting interaction map...') pt_dd = U.get_pt_dd(C, A.property, A.plot_type) fig = plt.figure(figsize=(12, 9)) col, row = U.gen_rc(len(data.keys()), pt_dd) grid = ImageGrid(fig, 111, nrows_ncols=(row, col), axes_pad=0.3, add_all=True, label_mode="L") if 'denorminators' in pt_dd: for gk in data: dn = U.get_param(pt_dd['denorminators'], gk) logger.info('denorminator: {0}'.format(dn)) data[gk] = data[gk] / dn # used to set up the reference point and the range of color bar max_ = get_max(data) for k, gk in enumerate(data.keys()): ax = grid[k] da = data[gk] rda = da logger.info('map max: {0}; map min: {1}'.format(rda.max(), rda.min())) # JUST FOR REFERENCE OF SEEING WHERE THE END POINTS ARE, DEBUGGING USES # rda[-1][-1] = max_ # rda[0][-1] = max_ # sophisticated reversal to make x axis donor, y axis acceptor # rda = np.array([i[::-1] for i in da.transpose()[::-1]]) # sophisticated reversal to make x axis acceptor, y axis donor # rda = np.array([i[::-1] for i in da[::-1]]) params = get_params(gk, pt_dd) logger.info(params) # cmap_options: hot, gist_heat, Orange (printer friendly) # remove the info about the Hbonding information about first residue, # which ACE, this make the final map easier to understand rda = np.delete(np.delete(rda, 0, 0), 0, 1) im = ax.pcolormesh(rda, **params) if 'clim' in pt_dd: im.set_clim(**pt_dd['clim']) else: im.set_clim(0, max_) logger.info('shape after removal of the 0th residue: {0}'.format( rda.shape)) ax.set_xlim([0, rda.shape[0]]) ax.set_ylim([0, rda.shape[1]]) ax.minorticks_on() decorate_ax(ax, pt_dd, gk) plt.colorbar(im, shrink=.5, orientation='vertical', anchor=(1.3, 0)) plt.savefig(U.gen_output_filename(A, C), **pt_dd.get('savefig', {}))
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sys import argv from requests import get from opentelemetry import propagators from utils import get_param from ot_utils import init_jaeger jaeger_host, server1_port, server2_port = get_param() tracer = init_jaeger(jaeger_host, 'fastapi_opentelemetry_client') assert len(argv) == 2 with tracer.start_as_current_span("client"): with tracer.start_as_current_span("client-server"): headers = {} propagators.inject(dict.__setitem__, headers) requested = get( f"http://localhost:{server1_port}/server_request", params={"param": argv[1]}, headers=headers, )
def grped_xy(data, A, C, **kw): pt_dd = U.get_pt_dd(C, '_'.join(A.properties), A.plotmp_type) dsets = grp_datasets(data, pt_dd) fig = plt.figure() ax = fig.add_subplot(111) xp, yp = A.properties if sorted([xp, yp]) == ['unv', 'upv']: # this is plot specific ax.plot([0, 1], [0, 1], '--') for dk in dsets.keys(): dset = dsets[dk] xda, yda = dset[xp], dset[yp] # da: data # denormx = [float(i) for i in pt_dd.get('denormx', [1] * len(xdata.keys()))] # denormy = [float(i) for i in pt_dd.get('denormy', [1] * len(xdata.keys()))] k1, k2 = xda.keys( ) # ONLY deal with two members in a group (e.g. w, m) x1, x2 = xda[k1], xda[k2] y1, y2 = yda[k1], yda[k2] # DEPRECATED! normalization should be done in plot.py, this is left # here for remembering the bad design # if 'denormx' in pt_dd: # dx1 = float(pt_dd['denormx'][k1]) # dx: denormx # dx2 = float(pt_dd['denormx'][k2]) # x1, x2 = x1 / dx1, x2 / dx2 # if 'denormy' in pt_dd: # dy1 = float(pt_dd['denormy'][k1]) # dy: denormy # dy2 = float(pt_dd['denormy'][k2]) # y1, y2 = y1 / dy1, y2 / dy2 params1, params2 = {}, {} if 'markers' in pt_dd: params1['marker'] = U.get_param(pt_dd['markers'], k1) params2['marker'] = U.get_param(pt_dd['markers'], k2) if 'colors' in pt_dd: params1['color'] = U.get_param(pt_dd['colors'], k1) params2['color'] = U.get_param(pt_dd['colors'], k2) ax.errorbar(x1[0], y1[0], xerr=x1[1], yerr=y1[1], **params1) ax.errorbar(x2[0], y2[0], xerr=x2[1], yerr=y2[1], **params2) ax.annotate( "", # STRANGE! the order need to be reversed xy=(x2[0], y2[0]), xycoords='data', xytext=(x1[0], y1[0]), textcoords='data', arrowprops=dict( arrowstyle="->", #linestyle="dashed", # arrowstyle="fancy", color="black", alpha=1, shrinkA=10, shrinkB=10, # connectionstyle="arc3,rad=-0.3", connectionstyle="arc3", ), ) decorate_ax(ax, pt_dd) plt.savefig(U.gen_output_filename(A, C), **pt_dd.get('savefig', {}))
def grped_xy(data, A, C, **kw): pt_dd = U.get_pt_dd(C, '_'.join(A.properties), A.plotmp_type) dsets = grp_datasets(data, pt_dd) fig = plt.figure() ax = fig.add_subplot(111) xp, yp = A.properties if sorted([xp, yp]) == ['unv', 'upv']: # this is plot specific ax.plot([0,1], [0,1], '--') for dk in dsets.keys(): dset = dsets[dk] xda, yda = dset[xp], dset[yp] # da: data # denormx = [float(i) for i in pt_dd.get('denormx', [1] * len(xdata.keys()))] # denormy = [float(i) for i in pt_dd.get('denormy', [1] * len(xdata.keys()))] k1, k2 = xda.keys() # ONLY deal with two members in a group (e.g. w, m) x1, x2 = xda[k1], xda[k2] y1, y2 = yda[k1], yda[k2] # DEPRECATED! normalization should be done in plot.py, this is left # here for remembering the bad design # if 'denormx' in pt_dd: # dx1 = float(pt_dd['denormx'][k1]) # dx: denormx # dx2 = float(pt_dd['denormx'][k2]) # x1, x2 = x1 / dx1, x2 / dx2 # if 'denormy' in pt_dd: # dy1 = float(pt_dd['denormy'][k1]) # dy: denormy # dy2 = float(pt_dd['denormy'][k2]) # y1, y2 = y1 / dy1, y2 / dy2 params1, params2 = {}, {} if 'markers' in pt_dd: params1['marker'] = U.get_param(pt_dd['markers'], k1) params2['marker'] = U.get_param(pt_dd['markers'], k2) if 'colors' in pt_dd: params1['color'] = U.get_param(pt_dd['colors'], k1) params2['color'] = U.get_param(pt_dd['colors'], k2) ax.errorbar(x1[0], y1[0], xerr=x1[1], yerr=y1[1], **params1) ax.errorbar(x2[0], y2[0], xerr=x2[1], yerr=y2[1], **params2) ax.annotate("", # STRANGE! the order need to be reversed xy=(x2[0], y2[0]), xycoords='data', xytext=(x1[0], y1[0]), textcoords='data', arrowprops=dict(arrowstyle="->", #linestyle="dashed", # arrowstyle="fancy", color="black", alpha=1, shrinkA=10, shrinkB=10, # connectionstyle="arc3,rad=-0.3", connectionstyle="arc3", ), ) decorate_ax(ax, pt_dd) plt.savefig(U.gen_output_filename(A, C), **pt_dd.get('savefig', {}))
def check_polimages(self): """ Sort out any beams or planes, which are useless for the imaging """ # Collect the beam and noise parameters from the main parameter file rms_array = np.full((40, self.pol_end_sb + 1 - self.pol_start_sb, 2), np.nan) bmaj_array = np.full((40, self.pol_end_sb + 1 - self.pol_start_sb, 2), np.nan) bmin_array = np.full((40, self.pol_end_sb + 1 - self.pol_start_sb, 2), np.nan) bpa_array = np.full((40, self.pol_end_sb + 1 - self.pol_start_sb, 2), np.nan) for beam in range(0, 40, 1): try: rms_array[beam, :] = utils.get_param( self, 'polarisation_B' + str(beam).zfill(2) + '_targetbeams_qu_imagestats')[:, 2, :] bmaj_array[beam, :] = utils.get_param( self, 'polarisation_B' + str(beam).zfill(2) + '_targetbeams_qu_beamparams')[:, 0, :] bmin_array[beam, :] = utils.get_param( self, 'polarisation_B' + str(beam).zfill(2) + '_targetbeams_qu_beamparams')[:, 1, :] bpa_array[beam, :] = utils.get_param( self, 'polarisation_B' + str(beam).zfill(2) + '_targetbeams_qu_beamparams')[:, 2, :] except KeyError: print( 'Synthesised beam parameters and/or noise statistics of beam ' + str(beam).zfill(2) + ' are not available. Excluding beam!') np.savetxt(self.polmosaicdir + '/Qrms.npy', rms_array[:, :, 0]) np.savetxt(self.polmosaicdir + '/Qbmaj.npy', bmaj_array[:, :, 0]) np.savetxt(self.polmosaicdir + '/Qbmin.npy', bmin_array[:, :, 0]) np.savetxt(self.polmosaicdir + '/Qbpa.npy', bpa_array[:, :, 0]) np.savetxt(self.polmosaicdir + '/Urms.npy', rms_array[:, :, 1]) np.savetxt(self.polmosaicdir + '/Ubmaj.npy', bmaj_array[:, :, 1]) np.savetxt(self.polmosaicdir + '/Ubmin.npy', bmin_array[:, :, 1]) np.savetxt(self.polmosaicdir + '/Ubpa.npy', bpa_array[:, :, 1]) # Create an array for the accepted beams accept_array = np.full((40, self.pol_end_sb + 1 - self.pol_start_sb), True) # Iterate through the rms and beam sizes of all cubes and filter the images for b in range(40): for sb in range(self.pol_start_sb, self.pol_end_sb + 1): if rms_array[b, sb, 0] > self.pol_rmsclip or np.isnan( rms_array[b, sb, 0]): accept_array[b, sb] = False else: continue for sb in range(self.pol_start_sb, self.pol_end_sb + 1): if rms_array[b, sb, 1] > self.pol_rmsclip or np.isnan( rms_array[b, sb, 1]): accept_array[b, sb] = False else: continue for sb in range(self.pol_start_sb, self.pol_end_sb + 1): if bmin_array[b, sb, 0] > self.pol_bmin or bmin_array[ b, sb, 1] > self.pol_bmin: accept_array[b, sb] = False else: continue for sb in range(self.pol_start_sb, self.pol_end_sb + 1): if bmaj_array[b, sb, 0] > self.pol_bmaj or bmaj_array[ b, sb, 1] > self.pol_bmaj: accept_array[b, sb] = False else: continue np.savetxt(self.polmosaicdir + '/accept_array.npy', accept_array) # Generate the main array for accepting the beams bacc_array = np.full(40, True, dtype=bool) badim_array = np.zeros((40)) # Count number of False for each beam and filter all beams out where more than x planes or more are bad for b in range(40): badim_array[b] = len(np.where(accept_array[b, :] == False)[0]) if badim_array[b] > self.pol_badim: bacc_array[b] = False accept_array[b, :] = False else: continue np.savetxt(self.polmosaicdir + '/badim.npy', badim_array) np.savetxt(self.polmosaicdir + '/bacc.npy', bacc_array) # Generate the array for accepting the subbands sb_acc = np.full(self.pol_end_sb + 1 - self.pol_start_sb, True, dtype=bool) for sb in range(self.pol_start_sb, self.pol_end_sb + 1): if np.sum(accept_array[:, sb]) < np.sum(bacc_array): sb_acc[sb] = False np.savetxt(self.polmosaicdir + '/sbacc.npy', sb_acc) final_acc_arr = np.full((40, self.pol_end_sb + 1 - self.pol_start_sb), True) for b in range(40): for sb in range(self.pol_start_sb, self.pol_end_sb + 1): if bacc_array[b] and sb_acc[sb]: final_acc_arr[b, sb] = True else: final_acc_arr[b, sb] = False np.savetxt(self.polmosaicdir + '/final_accept.npy', final_acc_arr) return final_acc_arr
for i in range(100 if (num == -1) else 1): # 重新获取 net ctx = utils.try_gpu() print(ctx) net = get_net(ctx) print(net.name) # 获取一套参数 if num == -1: # 从参数表+got表自动中获取一套待测的 n, p = utils.get_todo_param(params_file=params_file) if p is None: break else: # 从参数表中获取指定的一套(num 从1开始) n, p = utils.get_param(num, params_file=params_file) n = 0 print(p) # 开始训练 print("=" * 60, "%i(%i) Start" % (i, n)) # 这里添加 None 后,s取出来才是object的,s['num_epochs']才是int型的 p['jiong'] = None s = p.iloc[0] train(net, train_data, valid_data, s['num_epochs'], s['learning_rate'], s['weight_decay'], ctx, s['lr_period'], s['lr_decay']) print("=" * 60, "%i(%i) End" % (i, n)) sleep(3) # 保存参数表
def api_statements_count(request): statements = Statement.objects.published() tag = get_param(request, "tag") if tag: statements = statements.filter(tag__slug=tag) return HttpResponse("%d" % len(statements))
def PLAY(url, name, sub_files=[]): link = get_html(url) match = re.compile('\'(.+?.m3u8)\'').findall(link) for u in match: listitem = xbmcgui.ListItem(name) listitem.setInfo('video', {'Title': name, 'Genre': 'Humor'}) Player().play(unquote(u), listitem) while not Player().isPlaying(): sleep(10) # wait until video is being played for s in save_subs(path=addon.getAddonInfo('path'), subs_href=sub_files, url=url): Player().setSubtitles(s) url = get_param("url") name = get_param("name") mode = get_param("mode") if mode is None or url is None or len(url) < 1: HOME() elif mode == 's': SEASONS(url) elif mode == 'e': EPISODES(url) elif mode == 'l': CHOOSE(url, name) elif mode == 'v': PLAY(url, name) xbmcplugin.endOfDirectory(int(sys.argv[1]))
payload = {"lang_id": "0", "platforma": "ANDROID1"} head = { 'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'Keep-Alive', 'Accept-Encoding': 'gzip', 'User-Agent': 'okhttp/3.9.1' } def get_xtoken_bet(payload): sorted_values = [str(payload[key]) for key in sorted(payload.keys())] to_encode = ";".join(sorted_values + [olimp_secret_key]) return {"X-TOKEN": md5(to_encode.encode()).hexdigest()} olimp_url = 'http://' + get_param('server_olimp') olimp_url_https = 'https://' + get_param('server_olimp') olimp_url_random = 'https://{}.olimp-proxy.ru' # c 13 по 18й olimp_secret_key = 'b2c59ba4-7702-4b12-bef5-0908391851d9' olimp_head = { 'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'Keep-Alive', 'Accept-Encoding': 'gzip', 'User-Agent': 'okhttp/3.9.1' } olimp_data = { "live": 1, "platforma": "ANDROID1",