def build_op(build_type, json_str): """ call op functions with function name and input args json_str Args: build_type : op function name json_str (str): op function input args Raises: Exception: If specific keyword is not found. """ kernel_info = json.loads(json_str) check_kernel_info(kernel_info) # import module op_name = kernel_info['op_info']['name'] try: custom_flag = False if 'impl_path' in kernel_info and kernel_info['impl_path'] is not None: impl_path = os.path.realpath(kernel_info['impl_path']) if os.path.isfile(impl_path): path, file_name = os.path.split(impl_path) op_name, _ = os.path.splitext(file_name) impl_path = path custom_flag = True else: impl_path = "" _initialize(impl_path) inputs_args = get_args(kernel_info['op_info'], 'inputs') outputs_args = get_args(kernel_info['op_info'], 'outputs') attrs_args = get_args(kernel_info['op_info'], 'attrs') kernel_name = kernel_info['op_info']['kernel_name'] if custom_flag: op_module = __import__(op_name) else: op_module = __import__("impl."+op_name, globals(), locals(), [op_name], 0) # get function if build_type == op_build: if custom_flag: py_fn_name = kernel_info['op_info']['name'] else: py_fn_name = op_name else: raise ValueError("function {} is not supported by Tbe op {}.".format(build_type, op_name)) op_func = getattr(op_module, py_fn_name, None) if op_func is None: raise ValueError("Op:{} function {} is not supported by Tbe.".format(op_name, build_type)) # call function if kernel_name[0:19] == "bounding_box_encode": return op_func(*inputs_args, *outputs_args, *attrs_args, kernel_name_val=kernel_name) return op_func(*inputs_args, *outputs_args, *attrs_args, kernel_name=kernel_name) except Exception as e: raise RuntimeError(e)
def main(): args = get_args(__file__) labels = [ 'N/A', 'Id. Física', 'Id. Historiográfica', 'Desconocido', 'Perdido' ] colornames = ['light grey', 'medium green', 'denim blue', 'pale red'] df = pd.DataFrame(read_table(args.table)) df['ident'] = df.apply(categorize, axis=1) data = df\ .drop_duplicates(['bid', 'lid'], keep='first')\ .pivot(index='bid', columns='lid', values='ident')\ .fillna(0) #colors = sns.color_palette('hls', len(data)) #colors = sns.color_palette('husl', len(data)) #colors = sns.light_palette('red', len(data)) colors = sns.xkcd_palette(colornames) f, ax = plt.subplots() sns.heatmap(data, ax=ax, square=True, linewidth=0.5, cmap=ListedColormap(colors), cbar=False) set_axis(ax, data, as_letters(set(df.year.values)), 'Libros') legend(f, ax, labels, colors) plotting(plt, args)
def main(): args = get_args() client = Client('127.0.0.1:8786') ncores = sum(client.ncores().values()) pd.set_option('display.large_repr', 'truncate') pd.set_option('display.max_columns', 0) # noqa pd.set_option('display.max_rows', 1000) # noqa cann_group_df = make_cann_group_df(num_products=100) df = read_df(args, cann_group_df['productKey']) logger.info('Setting index') df = df.set_index('customerKey', drop=True) logger.info('Repartitioning') df = df.repartition(npartitions=ncores) logger.info('Mapping Cann Group') df['cannGroupKey'] = df['productKey'].map(cann_group_df['cannGroupKey']) logger.info('Persisting') df = client.persist(df) logger.info('Cann Groups') for cann_group_key in cann_group_df['cannGroupKey'].unique().tolist(): print('Filtering Cann Group %s' % cann_group_key) cann_df = df[df['cannGroupKey'] == cann_group_key] print('This df: %s' % (len(cann_df), )) with Timer('%s' % (cann_group_key, )): calculate_switching(cann_df) return
def main(): args = get_args(__file__) df = pd.DataFrame(read_table(args.table)) def track(row): # Mark it as id, which will go in red return 1000 if row['bid'] == 25 else row['bid'] df['track'] = df.apply(track, axis=1) data = df.pivot(index='pos', columns='lid', values='track') colornames = ['light blue', 'bright red'] colors = sns.xkcd_palette(colornames) f, ax = plt.subplots() sns.heatmap(data, ax=ax, square=True, linewidth=0.5, cmap=ListedColormap(colors), cbar=False) set_axis(ax, data, as_letters(set(df.year.values)), 'Posición') plotting(plt, args)
def main(): parser = get_parser() parser.add_argument('--list', action='store_true') parser.add_argument('--annotated', action='store_true') parser.add_argument('--color-by') args = get_args(__file__, parser) if args.list: print_list() else: plot(args)
def get_cxx_base_generic(operator): returns = '' if operator.params[0] == '_' else 'return' temp = common.get_args(len(operator.params) - 1) temp += ', ' if temp != '' else '' args = temp + 'F(), T()' if not operator.closed else temp + 'T()' return \ '''#if NSIMD_CXX > 0 namespace nsimd {{ {sig} {{ {returns} {name}({args}, NSIMD_SIMD()); }} }} // namespace nsimd #endif'''.format(name=operator.name, args=args, returns=returns, sig=operator.get_generic_signature('cxx_base')[:-1])
def get_c_base_generic(operator): vas = common.get_args(len(operator.params) - 1) sig = operator.get_generic_signature('c_base') if not operator.closed: return \ '''{sig} NSIMD_PP_CAT_6(nsimd_{name}_, NSIMD_SIMD, _, \\ to_type, _, from_type)({vas}) {sig_e} NSIMD_PP_CAT_6(nsimd_{name}_, simd_ext, _, \\ to_type, _, from_type)({vas})'''. \ format(sig=sig[0], sig_e=sig[1], name=operator.name, vas=vas) else: return \ '''{sig} NSIMD_PP_CAT_4(nsimd_{name}_, NSIMD_SIMD, _, type)({vas}) {sig_e} NSIMD_PP_CAT_4(nsimd_{name}_, simd_ext, _, type)({vas})'''. \ format(sig=sig[0], sig_e=sig[1], name=operator.name, vas=vas)
def main(wf): args = get_args(wf.args) query = args.query.strip() raw_dashboard_url = wf.settings.get("dashboard_url") if not raw_dashboard_url: _report_missing_var(wf, "dashboad url") else: dashboard_url = raw_dashboard_url.strip() if query: if dashboard_url[-1] == "/": base_search = "#!/search?q=" else: base_search = "/#!/search?q=" _open_url(dashboard_url + base_search + query) else: _open_url(dashboard_url)
def main(): args = get_args(__file__) df = pd.DataFrame(read_table(args.table)) data = df.pivot(index='pos', columns='lid', values='bid') # colors = sns.color_palette('hls', len(data)) # colors = sns.color_palette('husl', len(data)) # colors = sns.light_palette('red', len(data)) colors = sns.light_palette('navy', len(data)) f, ax = plt.subplots() sns.heatmap(data, ax=ax, square=True, linewidth=0.5, cmap=ListedColormap(colors), cbar=False) set_axis(ax, data, as_letters(set(df.year.values)), 'Posición') plotting(plt, args)
def main(): args = get_args(__file__) # TODO fix axis df = pd.DataFrame(read_table(args.table))\ .pivot(index='pos', columns='lid')\ .fillna(float('NaN')) title = 'Altura de libro por inventario y posición' plots = df.height.plot(kind='bar', subplots=True, title=title, grid=True) for plot in plots: plot.set_title('') plot.set_ylabel('') plot.legend(loc='upper right', bbox_to_anchor=(1.1, 1)) visible = set(range(1, len(df), 5)) for n, label in enumerate(plots[-1].xaxis.get_ticklabels()): label.set_visible(n + 1 in visible) plotting(plt, args)
def main(): args = get_args(__file__) names = ['NA', 'LAT', 'ROM', 'FRAN'] labels = ['N/A', 'Latín', 'Romance', 'Francés'] colornames = ['light grey', 'pale red', 'medium green', 'denim blue'] df = pd.DataFrame(read_table(args.table)) data = categorical_by(df, 'lang', names) colors = sns.xkcd_palette(colornames) f, ax = plt.subplots() sns.heatmap(data, ax=ax, square=True, linewidth=0.5, cmap=ListedColormap(colors), cbar=False) set_axis(ax, data, as_letters(set(df.year.values)), 'Libros') legend(f, ax, labels, colors) plotting(plt, args)
def main(): args = get_args(__file__) names = ['NA', 'REL', 'CRONIC', 'ANTI'] labels = ['N/A', 'Religioso', 'Crónicas y Leyes', 'Historia Antigua'] colornames = ['light grey', 'pale red', 'medium green', 'denim blue'] df = pd.DataFrame(read_table(args.table)) #df = pd.read_csv(args.table) data = categorical_by(df, 'topic', names) colors = sns.xkcd_palette(colornames) f, ax = plt.subplots() sns.heatmap(data, ax=ax, square=True, linewidth=0.5, cmap=ListedColormap(colors), cbar=False) set_axis(ax, data, as_letters(df.year.values), ylabel='Posición') legend(f, ax, labels, colors) plotting(plt, args)
def main(): args = get_args(__file__) df = pd.DataFrame(read_table(args.table)) data = df\ .drop_duplicates(['bid', 'lid'], keep='first')\ .pivot(index='bid', columns='lid', values='year')\ .fillna(False) #colors = sns.color_palette('hls', len(data)) #colors = sns.color_palette('husl', len(data)) colors = sns.light_palette('red', len(data)) f, ax = plt.subplots() sns.heatmap(data, ax=ax, square=True, linewidth=0.5, cmap=ListedColormap(colors), cbar=False) set_axis(ax, data, as_letters(set(df.year.values)), 'Libros') plotting(plt, args)
def main(): args = get_args(__file__) df = pd.DataFrame(read_table(args.table)) sizes = len(range(int(df['height'].max()))) data = df.pivot(index='pos', columns='lid', values='height').fillna(0) #colors = sns.color_palette('cubehelix', sizes) #colors = sns.color_palette('hls', len(data)) #colors = sns.color_palette('husl', len(data)) #colors = sns.light_palette('red', sizes) #colors = sns.light_palette('navy', sizes) colors = sns.light_palette('green', sizes) f, ax = plt.subplots() sns.heatmap(data, ax=ax, square=True, linewidth=0.5, cmap=ListedColormap(colors), cbar=False) set_axis(ax, data, as_letters(set(df.year.values)), 'Tamaño') plotting(plt, args)
def get_generic_signature(self, lang): if lang == 'c_base': vas = common.get_args(len(self.params) - 1) args = vas + (', ' if vas != '' else '') args += 'from_type, to_type' if not self.closed else 'type' return ['#define v{name}({args})'.format(name=self.name, args=args), '#define v{name}_e({args}, simd_ext)'. \ format(name=self.name, args=args)] elif lang == 'cxx_base': return_typ = common.get_one_type_generic(self.params[0], 'T') if return_typ.startswith('vT'): return_typ = \ 'typename simd_traits<T, NSIMD_SIMD>::simd_vector{}'. \ format(return_typ[2:]) elif return_typ == 'vlT': return_typ = \ 'typename simd_traits<T, NSIMD_SIMD>::simd_vectorl' args_list = common.enum(self.params[1:]) temp = ', '.join(['typename A{}'.format(a[0]) for a in args_list]) temp += ', ' if temp != '' else '' if not self.closed: tmpl_args = temp + 'typename F, typename T' else: tmpl_args = temp + 'typename T' temp = ', '.join(['A{i} a{i}'.format(i=a[0]) for a in args_list]) temp += ', ' if temp != '' else '' if not self.closed: func_args = temp + 'F, T' else: func_args = temp + 'T' return \ 'template <{tmpl_args}> {return_typ} {name}({func_args});'. \ format(return_typ=return_typ, tmpl_args=tmpl_args, func_args=func_args, name=self.name) elif lang == 'cxx_adv': def get_pack(param): return 'pack{}'.format(param[1:]) if param[0] == 'v' \ else 'packl' args_list = common.enum(self.params[1:]) inter = [i for i in ['v', 'l', 'vx2', 'vx3', 'vx4'] \ if i in self.params[1:]] # Do we need tag dispatching on pack<>? e.g. len, set1 and load* need_tmpl_pack = get_pack(self.params[0]) if inter == [] else None # Compute template arguments tmpl_args = [] if not self.closed: tmpl_args += ['typename ToPackType'] tmpl_args1 = tmpl_args + ['typename T', 'typename SimdExt'] tmpl_argsN = tmpl_args + [ 'typename T', 'int N', 'typename SimdExt' ] other_tmpl_args = ['typename A{}'.format(i[0]) for i in args_list \ if i[1] not in ['v', 'l']] tmpl_args1 += other_tmpl_args tmpl_argsN += other_tmpl_args tmpl_args1 = ', '.join(tmpl_args1) tmpl_argsN = ', '.join(tmpl_argsN) # Compute function arguments def arg_type(arg, N): if arg[1] in ['v', 'l']: pack_typ = 'pack' if arg[1] == 'v' else 'packl' return '{}<T, {}, SimdExt> const&'.format(pack_typ, N) else: return 'A{}'.format(arg[0]) args1 = [ '{} a{}'.format(arg_type(i, '1'), i[0]) for i in args_list ] argsN = [ '{} a{}'.format(arg_type(i, 'N'), i[0]) for i in args_list ] # Arguments without tag dispatching on pack other_argsN = ', '.join(argsN) if not self.closed: args1 = ['ToPackType'] + args1 argsN = ['ToPackType'] + argsN if need_tmpl_pack != None: args1 = ['{}<T, 1, SimdExt> const&'.format(need_tmpl_pack)] + \ args1 argsN = ['{}<T, N, SimdExt> const&'.format(need_tmpl_pack)] + \ argsN args1 = ', '.join(args1) argsN = ', '.join(argsN) # Compute return type ret1 = 'ToPackType' if not self.closed \ else common.get_one_type_generic_adv_cxx(self.params[0], 'T', '1') retN = 'ToPackType' if not self.closed \ else common.get_one_type_generic_adv_cxx(self.params[0], 'T', 'N') ret = { \ '1': 'template <{tmpl_args1}> {ret1} {cxx_name}({args1});'. \ format(tmpl_args1=tmpl_args1, ret1=ret1, args1=args1, cxx_name=self.name), 'N': 'template <{tmpl_argsN}> {retN} {cxx_name}({argsN});'. \ format(tmpl_argsN=tmpl_argsN, retN=retN, argsN=argsN, cxx_name=self.name) } if self.cxx_operator: ret.update({ \ 'op1': 'template <{tmpl_args1}> {ret1} {cxx_name}({args1});'. \ format(tmpl_args1=tmpl_args1, ret1=ret1, args1=args1, cxx_name=self.cxx_operator), 'opN': 'template <{tmpl_argsN}> {retN} {cxx_name}({argsN});'. \ format(tmpl_argsN=tmpl_argsN, retN=retN, argsN=argsN, cxx_name=self.cxx_operator) }) if not self.closed: ret['dispatch'] = \ 'template <{tmpl_argsN}> {retN} {cxx_name}({other_argsN});'. \ format(tmpl_argsN=tmpl_argsN, other_argsN=other_argsN, retN=retN, cxx_name=self.name) elif need_tmpl_pack != None: other_tmpl_args = ', '.join(['typename SimdVector'] + \ other_tmpl_args) ret['dispatch'] = \ '''template <{other_tmpl_args}> SimdVector {cxx_name}({other_argsN});'''. \ format(other_tmpl_args=other_tmpl_args, other_argsN=other_argsN, cxx_name=self.name) return ret else: raise Exception('Lang must be one of c_base, cxx_base, cxx_adv')
def get_impl(operator, totyp, typ): global fmtspec fmtspec = { 'in0': common.in0, 'in1': common.in1, 'in2': common.in2, 'typ': typ, 'totyp': totyp, 'typnbits': typ[1:] } # src operators if operator.src: oneapi_ops = { 'sin_u35': 'sin', 'cos_u35': 'cos', 'tan_u35': 'tan', 'asin_u35': 'asin', 'acos_u35': 'acos', 'atan_u35': 'atan', 'atan2_u35': 'atan2', 'log_u35': 'log', 'cbrt_u35': 'cbrt', 'sin_u10': 'sin', 'cos_u10': 'cos', 'tan_u10': 'tan', 'asin_u10': 'asin', 'acos_u10': 'acos', 'atan_u10': 'atan', 'atan2_u10': 'atan2', 'log_u10': 'log', 'cbrt_u10': 'cbrt', 'exp_u10': 'exp', 'pow_u10': 'pow', 'sinh_u10': 'sinh', 'cosh_u10': 'cosh', 'tanh_u10': 'tanh', 'sinh_u35': 'sinh', 'cosh_u35': 'cosh', 'tanh_u35': 'tanh', 'fastsin_u3500': 'sin', 'fastcos_u3500': 'cos', 'fastpow_u3500': 'pow', 'asinh_u10': 'asinh', 'acosh_u10': 'acosh', 'atanh_u10': 'atanh', 'exp2_u10': 'exp2', 'exp2_u35': 'exp2', 'exp10_u10': 'exp10', 'exp10_u35': 'exp10', 'expm1_u10': 'expm1', 'log10_u10': 'log10', 'log2_u10': 'log2', 'log2_u35': 'log2', 'log1p_u10': 'log1p', 'sinpi_u05': 'sinpi', 'cospi_u05': 'cospi', 'hypot_u05': 'hypot', 'hypot_u35': 'hypot', 'remainder': 'remainder', 'fmod': 'fmod', 'lgamma_u10': 'lgamma', 'tgamma_u10': 'tgamma', 'erf_u10': 'erf', 'erfc_u15': 'erfc' } return 'return cl::sycl::{}({});'.format( oneapi_ops[operator.name], common.get_args(len(operator.params[1:]))) # bool first, no special treatment for f16's bool_operators = ['andl', 'orl', 'xorl', 'andnotl', 'notl'] if operator.name in bool_operators: if operator.name == 'notl': return 'return nsimd_scalar_{op}({in0});'.\ format(op=operator.name,**fmtspec) else: return 'return nsimd_scalar_{op}({in0}, {in1});'.\ format(op=operator.name,**fmtspec) # infix operators no special treatment for f16's infix_operators = ['orb', 'andb', 'andnotb', 'notb', 'xorb'] if operator.name in infix_operators: if operator.name == 'notb': return 'return nsimd_scalar_{op}_{typ}({in0});'.\ format(op=operator.name,**fmtspec) else: return 'return nsimd_scalar_{op}_{typ}({in0}, {in1});'.\ format(op=operator.name,**fmtspec) # reinterpret if operator.name == 'reinterpret': return reinterpret(totyp, typ) # cvt if operator.name == 'cvt': if 'f16' == totyp: # conversion op: takes in a 32 bit float and converts it to 16 bits return 'return sycl::half(static_cast<f32>({in0}));'. \ format(**fmtspec) else: return 'return nsimd_scalar_cvt_{totyp}_{typ}({in0});'. \ format(**fmtspec) # to_mask if operator.name == 'to_mask': return 'return nsimd_scalar_to_mask_{totyp}({in0});'.format(**fmtspec) # to_logical if operator.name == 'to_logical': return 'return nsimd_scalar_to_logical_{typ}({in0});'.format(**fmtspec) # for all other operators, f16 has a special treatment if typ == 'f16': return get_impl_f16(operator, totyp, typ) # infix operators - rec - f32, f64 infix_op_rec_ftypes = ['rec', 'rec8', 'rec11'] if typ in common.ftypes_no_f16 and operator.name in infix_op_rec_ftypes: return '''// sycl::recip available in native form only return 1.0{f} / {in0};'''. \ format(f='f' if typ == 'f32' else '', **fmtspec) # infix operators - cmp - f32, f64 infix_op_cmp_f32_f64 = { 'lt': 'return {cast_to_int}sycl::isless({in0}, {in1});', 'gt': 'return {cast_to_int}sycl::isgreater({in0}, {in1});', 'le': 'return {cast_to_int}sycl::islessequal({in0}, {in1});', 'ge': 'return {cast_to_int}sycl::isgreaterequal({in0}, {in1});', 'ne': 'return {cast_to_int}sycl::isnotequal({in0}, {in1});', 'eq': 'return {cast_to_int}sycl::isequal({in0}, {in1});' } if typ in common.ftypes_no_f16 and operator.name in infix_op_cmp_f32_f64: return infix_op_cmp_f32_f64[operator.name]. \ format(cast_to_int='(int)' if typ == 'f64' else '', **fmtspec) # infix operators - cmp - integer types infix_op_cmp_iutypes = ['lt', 'gt', 'le', 'ge', 'ne', 'eq'] if operator.name in infix_op_cmp_iutypes: return 'return nsimd_scalar_{op}_{typ}({in0},{in1});'.\ format(op=operator.name, **fmtspec) # infix operators f32, f64 + integers # ref: see Data Parallel C++ book, pages 480, 481, 482 # TODO: do the functions below call instrinsics/built-in # functions on the device? # 'add': 'return std::plus<{typ}>()({in0}, {in1});', # 'sub': 'return std::minus<{typ}>()({in0}, {in1});', # 'mul': 'return std::multiplies<{typ}>()({in0}, {in1});', # 'div': 'return std::divides<{typ}>()({in0}, {in1});', infix_op_t = ['add', 'sub', 'mul', 'div'] if operator.name in infix_op_t: return 'return nsimd_scalar_{op}_{typ}({in0}, {in1});'. \ format(op=operator.name, **fmtspec) # neg # ref: see Data Parallel C++ book, pages 480, 481, 482 # TODO: does the function below call an instrinsic/built-in # function on the device? # 'neg': 'return std::negate<{typ}>()({in0});' if operator.name == 'neg': return 'return nsimd_scalar_{op}_{typ}({in0});'. \ format(op=operator.name, **fmtspec) # shifts shifts_op_ui_t = ['shl', 'shr', 'shra'] if operator.name in shifts_op_ui_t and typ in common.iutypes: return 'return nsimd_scalar_{op}_{typ}({in0}, {in1});'. \ format(op=operator.name, **fmtspec) # adds if operator.name == 'adds': if typ in common.ftypes: return 'return nsimd_scalar_add_{typ}({in0}, {in1});'. \ format(**fmtspec) else: return 'return sycl::add_sat({in0}, {in1});'.format(**fmtspec) # subs if operator.name == 'subs': if typ in common.ftypes: return 'return nsimd_scalar_sub_{typ}({in0}, {in1});'. \ format(**fmtspec) else: return 'return sycl::sub_sat({in0}, {in1});'.format(**fmtspec) # fma's if operator.name in ['fma', 'fms', 'fnma', 'fnms']: if typ in common.ftypes: neg = '-' if operator.name in ['fnma', 'fnms'] else '' op = '-' if operator.name in ['fnms', 'fms'] else '' return 'return sycl::fma({neg}{in0}, {in1}, {op}{in2});'. \ format(op=op, neg=neg, **fmtspec) else: return 'return nsimd_scalar_{op}_{typ}({in0}, {in1}, {in2});'. \ format(op=operator.name, **fmtspec) # other operators # round_to_even, ceil, floor, trunc, min, max, abs, sqrt # round_to_even if operator.name == 'round_to_even': if typ in common.ftypes_no_f16: return 'return sycl::rint({in0});'.format(**fmtspec) else: return 'return {in0};'.format(**fmtspec) # other rounding operators other_rounding_ops = ['ceil', 'floor', 'trunc'] if operator.name in other_rounding_ops: if typ in common.iutypes: return 'return nsimd_scalar_{op}_{typ}({in0});'. \ format(op=operator.name, **fmtspec) else: return 'return sycl::{op}({in0});'. \ format(op=operator.name, **fmtspec) # min/max if operator.name in ['min', 'max']: if typ in common.iutypes: return 'return sycl::{op}({in0}, {in1});'.\ format(op=operator.name, **fmtspec) else: op = 'sycl::fmin' if operator.name == 'min' else 'sycl::fmax' return 'return {op}({in0}, {in1});'.format(op=op, **fmtspec) # abs if operator.name == 'abs': if typ in common.itypes: return 'return ({typ})sycl::abs({in0});'.format(**fmtspec) elif typ in common.utypes: return 'return nsimd_scalar_abs_{typ}({in0});'.format(**fmtspec) else: return 'return sycl::fabs({in0});'.format(**fmtspec) # sqrt if operator.name == 'sqrt' and typ in common.ftypes: return 'return sycl::sqrt({in0});'.format(**fmtspec) # rsqrt if operator.name in ['rsqrt8', 'rsqrt11', 'rsqrt' ] and typ in common.ftypes: return 'return sycl::rsqrt({in0});'.format(**fmtspec)
def main(): parser = get_parser() parser.add_argument('--first', default=3, type=int) parser.add_argument('--second', default=4, type=int) parser.add_argument('--annotated', action='store_true') parser.add_argument('--iterations', default=10, type=int) parser.add_argument('--color-by') args = get_args(__file__, parser) columns = [args.first, args.second] df = pd.DataFrame(read_table(args.table)) data = df[(df.lid == args.first) | (df.lid == args.second)]\ .pivot(index='bid', columns='lid', values='pos')\ .sort_values(by=args.first)\ .fillna(0)\ .reindex_axis(columns, axis=1) # assure column order # Reindex by position meta = Metadata(index='pos', dfs=[df[df.lid == args.first], df[df.lid == args.second]]) palette_name = None title = 'Orden/Orden inventarios {} y {}'\ .format(to_letter(args.first), to_letter(args.second)) if not args.color_by: # Color based on wether theyre in both inventaries or missing data['color'] = data.apply( lambda row: any(not row[c] for c in columns), 1) else: variable_name = variable_names.get(args.color_by, args.color_by) title += ' variable "{}"'.format(variable_name) data['color'] = data.apply( lambda row: meta.get_field(args.color_by, * [row[c] for c in columns]), 1) # Group numerical values in 5 bins/categories color_sorter = None if args.color_by in ['area', 'height']: palette_name = 'YlOrRd' # yellow to red bins = 10 if args.color_by == 'height' else 5 data['color'] = pd.cut(data['color'], bins, precision=0) def color_sorter(e): return float(str(e).strip('(').strip(']').split(', ', 1)[0]) # Assure repeteable colors by setting category-color map # before lmplot does it randomly on each run and confuse us values = sorted(data['color'].unique(), key=color_sorter) colors = sns.color_palette(palette=palette_name, n_colors=len(values)) palette = dict(zip(values, colors)) # Use str as column names, otherwise lmplot goes wild columns = list(map(str, columns)) data.columns = columns + ['color'] p = sns.lmplot(*columns, data=data, hue='color', palette=palette, legend=False, legend_out=True, fit_reg=False, size=7, aspect=1.3) # Set top title and space for it plt.suptitle(title) p.fig.subplots_adjust(top=0.92) p.set(ylim=(0, None), xlim=(0, None)) # Set legend outside graph at center right if args.color_by: p.fig.subplots_adjust(right=0.85) variable_name = variable_names.get(args.color_by, args.color_by) plt.legend(bbox_to_anchor=(1.18, 0.7), borderaxespad=0., title=variable_name) if args.annotated: texts = [ p.ax.text( first, second, meta.get_field('short', first, second), fontsize=8, ) for first, second, color in data.values ] # for first, second, na in data.values: # # plt.annotate( # # meta.get(first, second)['short'], # # #str((first, second)), # # xy=(first, second), # # xytext=(first + 1, second + 1), # # fontsize=8, # # ) adjust_text(texts, force_points=1.5, lim=args.iterations, arrowprops=dict(arrowstyle="-", color='r', alpha=0.8)) plotting(plt, args)
import numpy as np import time from common import get_args, experiment_setup_test from copy import deepcopy import pickle import torch import tensorflow as tf from gym.envs.registration import register from collections import namedtuple BufferMock = namedtuple('Buffer', ['counter']) if __name__ == '__main__': # Getting arguments from command line + defaults # Set up learning environment including, gym env, ddpg agent, hgg/normal learner, tester args = get_args(do_just_test=True) env, agent, tester = experiment_setup_test(args) args.logger.summary_init(None, None) args.buffer = BufferMock(counter=0) '''#activate test setup if exists if hasattr(tester.env.env.env,'test_setup'): for e in tester.env_List: e.env.env.test_setup()''' # Progress info args.logger.add_item('N') args.logger.add_item('Epoch') args.logger.add_item('Cycle') args.logger.add_item('TimeCost(sec)')
max_rounds = 1500 C = 10 / 500 NC = 500 E = 1 B = 20 is_iid = False server_lr = 0.0316 client_lr = 0.0316 server_opt = "Yogi" client_opt = "SGD" client_opt_strategy = "reinit" # image_norm = "tflike" # TODO a paraméterek helytelen nevére nem adott hibát for l2 in [1e-3, 1e-2, 1e-1, 1e-1, 1e1]: s_opt_args = common.get_args(server_opt) s_opt_args["weight_decay"] = l2 config = TorchFederatedLearnerCIFAR100Config( BREAK_ROUND=300, CLIENT_LEARNING_RATE=client_lr, CLIENT_OPT=client_opt, CLIENT_OPT_ARGS=common.get_args(client_opt), CLIENT_OPT_L2=l2, CLIENT_OPT_STRATEGY=client_opt_strategy, SERVER_OPT=server_opt, SERVER_OPT_ARGS=s_opt_args, SERVER_LEARNING_RATE=server_lr, IS_IID_DATA=is_iid, BATCH_SIZE=B, CLIENT_FRACTION=C, N_CLIENTS=NC,
server_lr = 0.0316 client_lr = 0.0316 server_opt = "Yogi" client_opt = "SGD" client_opt_strategy = "reinit" # image_norm = "tflike" # TODO a paraméterek helytelen nevére nem adott hibát config = TorchFederatedLearnerCIFAR100Config( BREAK_ROUND=1500, CLIENT_LEARNING_RATE=client_lr, CLIENT_OPT=client_opt, # CLIENT_OPT_ARGS=common.get_args(client_opt), CLIENT_OPT_L2=1e-4, CLIENT_OPT_STRATEGY=client_opt_strategy, SERVER_OPT=server_opt, SERVER_OPT_ARGS=common.get_args(server_opt), SERVER_LEARNING_RATE=server_lr, IS_IID_DATA=is_iid, BATCH_SIZE=B, CLIENT_FRACTION=C, N_CLIENTS=NC, N_EPOCH_PER_CLIENT=E, MAX_ROUNDS=max_rounds, IMAGE_NORM="recordwisefull", NORM="group", INIT="tffed", AUG="basicf") config_technical = TorchFederatedLearnerTechnicalConfig(BREAK_ROUND=300, EVAL_ROUND=100) name = f"{config.SERVER_OPT}: {config.SERVER_LEARNING_RATE} - {config.CLIENT_OPT_STRATEGY} - {config.CLIENT_OPT}: {config.CLIENT_LEARNING_RATE}" experiment = Experiment(workspace="federated-learning",
vcf_dir=vcf_dir, dir_launch=dir_launch) print('configuring..') mutation_counter.configure(line, dir_launch) print('processing lines.') for line_counter, line in enumerate(infile): mutation_counter.process_line(line, bed_filter=bed_filter) print('writing.') mutation_counter.write_output() if __name__ == '__main__': chromosomes, reference, short, vcf_file, dir_launch, vcf_dir, bed_filter = get_args( ) bed_tag = ['', '_lb_'][int(len(bed_filter) > 0)] outfile_dir = dir_launch + '/' + reference + bed_tag + '_finescale_mut_spectra_vcf.' + vcf_dir + '/' try: os.mkdir(outfile_dir) except OSError as e: if e.errno != errno.EEXIST: raise if len(bed_filter): chrom_dict = read_bed(chromosomes, bed_filter) else: chrom_dict = {x: [] for x in chromosomes}
def main(): args = get_args() if not os.path.exists(os.path.join(args.output, "simple_example")): os.mkdir(os.path.join(args.output, "simple_example")) # [alpha, beta, sigma] true_parameters = [2, 3, 0.25] forward_model = lambda t: true_parameters[0] + t * true_parameters[1] # Generate data # Use a lot of data points - the approximations used only hold asymptotically n_points = 1000 times = np.linspace(0, 1, n_points) data = forward_model(times) + np.random.normal(0, true_parameters[2], len(times)) # Sensitivities are easy to write down (use pen and paper) sens1 = np.ones(n_points) sens2 = times # Compute inferred parameters inferred_params = scipy.stats.linregress(x=times, y=data) inferred_params = np.array((inferred_params[1], inferred_params[0])) # Estimate sigma sigma2 = sum((inferred_params[0] + inferred_params[1] * times - data)** 2) / (n_points - 1) print("observed sigma^2 vs true value\t{}, {}".format( sigma2, true_parameters[2]**2)) sens = np.matrix(np.stack((sens1, sens2))) H = sens @ sens.T # Compute FIM FIM = H / (sigma2) # Compute observed covariance matrix cov = np.linalg.inv(FIM) print("Covariance matrix is {}".format(cov)) print("Fisher information matrix is:\n{}".format(FIM)) # Add samples to the ellipse n_samples = 1000 samples = np.random.multivariate_normal(inferred_params, cov, n_samples) # Plot data against true model plt.plot(times, data, "o") plt.plot(times, forward_model(times)) # plot the output from the sampled parameters for sample in samples: plt.plot(times, sample[0] + sample[1] * times, color="grey", alpha=0.1) if args.plot: plt.show() else: plt.savefig( os.path.join(args.output, "simple_example", "synthetic_data")) # Plot 1 s.d ellipse # Does this plot make sense? What does it mean? fig, ax = cov_ellipse(cov, offset=inferred_params, q=[0.75, 0.9, 0.95, 0.99]) plt.xlabel("intercept") plt.ylabel("gradient") ax.plot(*samples.T, "x", color="grey", alpha=0.25) ax.legend() if args.plot: plt.show() else: plt.savefig( os.path.join(args.output, "simple_example", "1sd_ellipse_parameter_dist"))
def main(): args = common.get_args() func_print_messages( ) seqs = scf.input_scaffoldtsv( args.input ) size=common.Size( seqs, args.margin_bw_scaffolds, args.xlim_max, args.alignment_height ) #histograms = histogram.set_space( args.hist, seqs, size.histogram_height ) size.set_histogram_space( seqs, args.hist ) size.set_scaffold_layout( seqs, args.scaffold_layout ) size.output_parameters() fig = plt.figure( figsize=size.figsize_inch ) ax = fig.add_subplot(111) fig.patch.set_alpha( 0.0 ) func_set_axes( ax, size ) scf.plot_scaffolds( ax, seqs, args.scaffold_font_size ) ##plot scale bar scalebar = scf.Scalebar( size ) scalebar.plot( ax ) scalebar.output_parameters() ##plot alignment max_identity = args.max_identity input_formats = [ args.alignment, args.blastn, args.lastz, args.mummer ] func_plot_alignmment = [ alignment.plot_alignment4original, alignment.plot_alignment4blastn, alignment.plot_alignment4lastz, alignment.plot_alignment4mummer ] valid_files = alignment.count_alignment_files( args ) if valid_files == 0: pass else: min_identity = alignment.set_min_identity( args ) ##set colormap heatmap = alignment.Colormap( min_identity, max_identity, args.colormap ) heatmap.output_parameters() ##set and plot colormap legend heatmap_legend = alignment.Colorbox( size ) heatmap_legend.plot( ax, heatmap ) heatmap_legend.output_parameters() for files, func_plot in zip( input_formats, func_plot_alignmment ): if files is None: continue for fn in files: if not os.path.isfile( fn ): continue func_plot( seqs, ax, heatmap, size, fn ) ##plot mark_v if args.mark_v is not None: if os.path.isfile( args.mark_v ): mark_v.plot_mark_v( seqs, ax, size, args.mark_v ) ##plot gene if args.gff3 is not None: for fn in args.gff3: if not os.path.isfile( fn ): continue gff.plot_genes( seqs, ax, size, fn ) ##plot histogram histogram.plot_background( seqs, ax, size ) if args.hist is not None: for fn in args.hist: if not os.path.isfile( fn ): continue histogram.plot_histogram( seqs, ax, size, fn ) pdf_file = args.out + '.pdf' pp = PdfPages( pdf_file ) pp.savefig( fig, bbox_inches='tight' ) pp.close()
def get_generic_signature(self, lang): if lang == 'c_base': vas = common.get_args(len(self.params) - 1) args = vas + (', ' if vas != '' else '') args += 'from_type, to_type' if not self.closed else 'type' return ['#define v{name}({args})'.format(name=self.name, args=args), '#define v{name}_e({args}, simd_ext)'. \ format(name=self.name, args=args)] elif lang == 'cxx_base': def get_type(param, typename): if param == '_': return 'void' elif param == 'p': return 'int' elif param == 's': return typename elif param == '*': return '{}*'.format(typename) elif param == 'c*': return '{} const*'.format(typename) elif param == 'vi': return 'typename simd_traits<typename traits<{}>::itype,' \ ' NSIMD_SIMD>::simd_vector'.format(typename) elif param == 'l': return \ 'typename simd_traits<{}, NSIMD_SIMD>::simd_vectorl'. \ format(typename) elif param.startswith('v'): return \ 'typename simd_traits<{}, NSIMD_SIMD>::simd_vector{}'. \ format(typename, param[1:]) else: raise ValueError("Unknown param '{}'".format(param)) return_typ = get_type(self.params[0], 'T') args_list = common.enum(self.params[1:]) if not self.closed : tmpl_args = 'NSIMD_CONCEPT_VALUE_TYPE F, ' \ 'NSIMD_CONCEPT_VALUE_TYPE T' typename = 'F' else: tmpl_args = 'NSIMD_CONCEPT_VALUE_TYPE T' typename = 'T' temp = ', '.join(['{} a{}'.format(get_type(a[1], typename), a[0]) for a in args_list]) temp += ', ' if temp != '' else '' if not self.closed: func_args = temp + 'F, T' if self.output_to == common.OUTPUT_TO_SAME_SIZE_TYPES: cxx20_require = \ 'NSIMD_REQUIRES(sizeof_v<F> == sizeof_v<T>) ' elif self.output_to == common.OUTPUT_TO_UP_TYPES: cxx20_require = \ 'NSIMD_REQUIRES(2 * sizeof_v<F> == sizeof_v<T>) ' else: cxx20_require = \ 'NSIMD_REQUIRES(sizeof_v<F> == 2 * sizeof_v<T>) ' else: func_args = temp + 'T' cxx20_require = '' return 'template <{tmpl_args}> {cxx20_require}{return_typ} ' \ 'NSIMD_VECTORCALL {name}({func_args});'. \ format(return_typ=return_typ, tmpl_args=tmpl_args, func_args=func_args, name=self.name, cxx20_require=cxx20_require) elif lang == 'cxx_adv': def get_type(param, typename, N): if param == '_': return 'void' elif param == 'p': return 'int' elif param == 's': return typename elif param == '*': return '{}*'.format(typename) elif param == 'c*': return '{} const*'.format(typename) elif param == 'vi': return 'pack<typename traits<{}>::itype, {}, SimdExt>'. \ format(typename, N) elif param == 'l': return 'packl<{}, {}, SimdExt>'.format(typename, N) elif param.startswith('v'): return 'pack{}<{}, {}, SimdExt>'. \ format(param[1:], typename, N) else: raise ValueError("Unknown param '{}'".format(param)) args_list = common.enum(self.params[1:]) # Do we need tag dispatching on pack<>? e.g. len, set1 and load* inter = [i for i in ['v', 'l', 'vi', 'vx2', 'vx3', 'vx4'] \ if i in self.params[1:]] tag_dispatching = (inter == []) # Compute template arguments tmpl_args1 = ['NSIMD_CONCEPT_VALUE_TYPE T', 'NSIMD_CONCEPT_SIMD_EXT SimdExt'] tmpl_argsN = ['NSIMD_CONCEPT_VALUE_TYPE T', 'int N', 'NSIMD_CONCEPT_SIMD_EXT SimdExt'] def get_PACK(arg): if arg == 'l': return 'PACKL' elif arg == 'v': return 'PACK' else: return 'PACK{}'.format(arg[1:].upper()) if not self.closed: tmpl = 'NSIMD_CONCEPT_{} ToPackType'. \ format(get_PACK(self.params[0])) tmpl_args1 = [tmpl] + tmpl_args1 tmpl_argsN = [tmpl] + tmpl_argsN tmpl_args1 = ', '.join(tmpl_args1) tmpl_argsN = ', '.join(tmpl_argsN) # Compute function arguments def arg_type(arg, typename, N): if arg in ['v', 'vi', 'vx2', 'vx3', 'vx4', 'l']: return '{} const&'.format(get_type(arg, typename, N)) else: return get_type(arg, typename, N) args1 = ['{} a{}'.format(arg_type(i[1], 'T', '1'), i[0]) \ for i in args_list] argsN = ['{} a{}'.format(arg_type(i[1], 'T', 'N'), i[0]) \ for i in args_list] # Arguments without tag dispatching on pack other_argsN = ', '.join(argsN) # If we need tag dispatching, then the first argument type # is the output type: # 1. If not closed, then the output type is ToPackType # 2. If closed, then the output type is pack<T, N, SimdExt> if not self.closed: args1 = ['ToPackType const&'] + args1 argsN = ['ToPackType const&'] + argsN elif tag_dispatching: args1 = [arg_type(self.params[0], 'T', '1')] + args1 argsN = [arg_type(self.params[0], 'T', 'N')] + argsN args1 = ', '.join(args1) argsN = ', '.join(argsN) # Compute return type if not self.closed: ret1 = 'ToPackType' retN = 'ToPackType' else: ret1 = get_type(self.params[0], 'T', '1') retN = get_type(self.params[0], 'T', 'N') # For non closed operators that need tag dispatching we have a # require clause cxx20_require = '' if not self.closed: tmpl = 'NSIMD_REQUIRES((' \ '{}sizeof_v<typename ToPackType::value_type> == ' \ '{}sizeof_v<T> && ' \ 'ToPackType::unroll == {{}} && '\ 'std::is_same_v<typename ToPackType::simd_ext, SimdExt>))' if self.output_to == common.OUTPUT_TO_SAME_SIZE_TYPES: cxx20_require = tmpl.format('', '') elif self.output_to == common.OUTPUT_TO_UP_TYPES: cxx20_require = tmpl.format('', '2 * ') else: cxx20_require = tmpl.format('2 * ', '') ret = { \ '1': 'template <{tmpl_args1}> {cxx20_require}{ret1} ' \ '{cxx_name}({args1});'. \ format(tmpl_args1=tmpl_args1, cxx20_require=cxx20_require.format('1'), ret1=ret1, args1=args1, cxx_name=self.name), 'N': 'template <{tmpl_argsN}> {cxx20_require}{retN} ' \ '{cxx_name}({argsN});'. \ format(tmpl_argsN=tmpl_argsN, cxx20_require=cxx20_require.format('N'), retN=retN, argsN=argsN, cxx_name=self.name) } if self.cxx_operator: ret.update({ \ 'op1': '''template <{tmpl_args1}> {ret1} operator{cxx_name}({args1});'''. \ format(tmpl_args1=tmpl_args1, ret1=ret1, args1=args1, cxx_name=self.cxx_operator), 'opN': '''template <{tmpl_argsN}> {retN} operator{cxx_name}({argsN});'''. \ format(tmpl_argsN=tmpl_argsN, retN=retN, argsN=argsN, cxx_name=self.cxx_operator) }) if not self.closed: ret['dispatch'] = \ 'template <{tmpl_argsN}> {cxx20_require}{retN} ' \ '{cxx_name}({other_argsN});'. \ format(tmpl_argsN=tmpl_argsN, cxx20_require=cxx20_require.format('N'), other_argsN=other_argsN, retN=retN, cxx_name=self.name) elif tag_dispatching: if [i for i in ['s', '*', 'c*'] if i in self.params[1:]] == []: tmpl_T = '' requires = '' else: tmpl_T = ', NSIMD_CONCEPT_VALUE_TYPE T' requires = 'NSIMD_REQUIRES((' \ 'std::is_same_v<typename SimdVector::value_type, T>))' ret['dispatch'] = \ '''template <NSIMD_CONCEPT_{PACK} SimdVector{tmpl_T}>{requires} SimdVector {cxx_name}({other_argsN});'''.format( PACK=get_PACK(self.params[0]), requires=requires, other_argsN=other_argsN, cxx_name=self.name, tmpl_T=tmpl_T) return ret else: raise Exception('Lang must be one of c_base, cxx_base, cxx_adv')
import pyrogue import pyrogue.utilities.fileio import rogue.interfaces.stream import pysmurf.core.devices import common from mymodule.transmitters._MyTransmitter import MyTransmitter # Main body if __name__ == "__main__": # Read Arguments args = common.get_args() # Import the root device after the python path is updated from pysmurf.core.roots.CmbEth import CmbEth if not args['ip_addr']: sys.exit("ERROR: Must specify an IP address for ethernet base communication devices.") common.verify_ip(args) common.ping_fpga(args) # The PCIeCard object will take care of setting up the PCIe card (if present) with pysmurf.core.devices.PcieCard( lane = args['pcie_rssi_lane'], comm_type = "eth-rssi-interleaved", ip_addr = args['ip_addr'], dev_rssi = args['pcie_dev_rssi'],
project_name = "image-norm" max_rounds = 1500 C = 10 / 500 NC = 500 E = 1 B = 20 is_iid = False server_lr = 0.0316 client_lr = 0.0316 server_opt = "Yogi" client_opt = "SGD" client_opt_strategy = "reinit" # image_norm = "tflike" # TODO a paraméterek helytelen nevére nem adott hibát s_opt_args = common.get_args(server_opt) config = TorchFederatedLearnerCIFAR100Config( BREAK_ROUND=1500, CLIENT_LEARNING_RATE=client_lr, CLIENT_OPT=client_opt, # CLIENT_OPT_ARGS=common.get_args(client_opt), CLIENT_OPT_L2=1e-4, CLIENT_OPT_STRATEGY=client_opt_strategy, SERVER_OPT=server_opt, SERVER_OPT_ARGS=s_opt_args, SERVER_LEARNING_RATE=server_lr, IS_IID_DATA=is_iid, BATCH_SIZE=B, CLIENT_FRACTION=C, N_CLIENTS=NC, N_EPOCH_PER_CLIENT=E,
def get_impl(operator, totyp, typ): global fmtspec fmtspec = { 'in0': common.in0, 'in1': common.in1, 'in2': common.in2, 'typ': typ, 'totyp': totyp, 'typnbits': typ[1:] } if operator.name == 'trunc': if typ in common.iutypes: return 'return {in0};'.format(**fmtspec) elif typ == 'f16': c89_code = \ '''f32 buf = nsimd_f16_to_f32({in0}); return nsimd_f32_to_f16(buf >= 0.0f ? nsimd_scalar_floor_f32(buf) : nsimd_scalar_ceil_f32(buf));'''. \ format(**fmtspec) else: c89_code = \ '''return {in0} >= 0.0{f} ? nsimd_scalar_floor_{typ}({in0}) : nsimd_scalar_ceil_{typ}({in0});'''. \ format(f='f' if typ == 'f32' else '', **fmtspec) return libm_opn('trunc', 1, typ, True, c89_code) if operator.name == 'abs': if typ == 'f16': return '''f32 tmp = nsimd_f16_to_f32({in0}); return nsimd_f32_to_f16(tmp >= 0.0f ? tmp : -tmp);'''. \ format(**fmtspec) elif typ in common.utypes: return 'return {in0};'.format(**fmtspec) else: return 'return ({typ})({in0} >= ({typ})0 ? {in0} : -{in0});'. \ format(**fmtspec) if operator.name in ['min', 'max']: op = '<' if operator.name == 'min' else '>' if typ == 'f16': return '''f32 in0 = nsimd_f16_to_f32({in0}); f32 in1 = nsimd_f16_to_f32({in1}); return nsimd_f32_to_f16(in0 {op} in1 ? in0 : in1);'''. \ format(op=op, **fmtspec) else: return 'return {in0} {op} {in1} ? {in0} : {in1};'. \ format(op=op, **fmtspec) if operator.name == 'to_logical': if typ in common.iutypes: return 'return {in0} != ({typ})0;'.format(**fmtspec) else: return '''return nsimd_scalar_reinterpret_u{typnbits}_{typ}( {in0}) != (u{typnbits})0;'''.format(**fmtspec) if operator.name == 'to_mask': if typ in common.utypes: return 'return ({typ})({in0} ? -1 : 0);'.format(**fmtspec) else: return '''return nsimd_scalar_reinterpret_{typ}_u{typnbits}(( u{typnbits})({in0} ? -1 : 0));'''. \ format(**fmtspec) if operator.name == 'round_to_even': return round_to_even(typ) if operator.name in ['floor', 'ceil', 'sqrt']: if typ in common.iutypes and operator.name != 'sqrt': return 'return {in0};'.format(**fmtspec) return libm_opn(operator.name, 1, typ, False, '') if operator.name == 'fma': if typ in common.iutypes: return 'return ({typ})({in0} * {in1} + {in2});'.format(**fmtspec) else: if typ == 'f16': c89_code = 'return nsimd_f32_to_f16(nsimd_f16_to_f32({in0}) ' \ '* nsimd_f16_to_f32({in1}) ' \ '+ nsimd_f16_to_f32({in2}));'.format(**fmtspec) else: c89_code = 'return {in0} * {in1} + {in2};'.format(**fmtspec) return libm_opn(operator.name, 3, typ, False, c89_code) if operator.name in ['fnma', 'fms', 'fnms']: neg = '-' if operator.name in ['fnms', 'fnma'] else '' op = '-' if operator.name in ['fms', 'fnms'] else '+' if typ in common.iutypes: return 'return ({typ})(({neg}{in0}) * {in1} {op} {in2});'. \ format(neg=neg, op=op, **fmtspec) else: typ2 = 'f32' if typ == 'f16' else typ return opnum( 'nsimd_scalar_fma_{typ2}({neg}{{in0}}, {{in1}}, {op}{{in2}})'. \ format(typ2=typ2, neg=neg, op=op, **fmtspec), typ) f = 'f' if typ in ['f16', 'f32'] else '' typ2 = 'f32' if typ == 'f16' else typ if operator.src: if typ == 'f16': return \ '''return nsimd_f32_to_f16( nsimd_sleef_{op_name}_scalar_f32({vas}));'''. \ format(op_name=operator.name, vas=', '.join(['nsimd_f16_to_f32({})'. \ format(common.get_arg(i)) \ for i in range(len(operator.params[1:]))]), **fmtspec) else: return 'return nsimd_sleef_{op_name}_scalar_{typ}({vas});'. \ format(op_name=operator.name, vas=common.get_args(len(operator.params[1:])), **fmtspec) func = { 'orb': lambda: opbit('{in0} | {in1}', typ), 'andb': lambda: opbit('{in0} & {in1}', typ), 'andnotb': lambda: opbit('{in0} & (~{in1})', typ), 'notb': lambda: opbit('~{in0}', typ), 'xorb': lambda: opbit('{in0} ^ {in1}', typ), 'add': lambda: opnum('{in0} + {in1}', typ), 'sub': lambda: opnum('{in0} - {in1}', typ), 'mul': lambda: opnum('{in0} * {in1}', typ), 'div': lambda: opnum('{in0} / {in1}', typ), 'neg': lambda: opnum('-{in0}', typ), 'lt': lambda: cmp('{in0} < {in1}', typ), 'gt': lambda: cmp('{in0} > {in1}', typ), 'le': lambda: cmp('{in0} <= {in1}', typ), 'ge': lambda: cmp('{in0} >= {in1}', typ), 'ne': lambda: cmp('{in0} != {in1}', typ), 'eq': lambda: cmp('{in0} == {in1}', typ), 'andl': lambda: 'return {in0} && {in1};'.format(**fmtspec), 'orl': lambda: 'return {in0} || {in1};'.format(**fmtspec), 'xorl': lambda: 'return {in0} ^ {in1};'.format(**fmtspec), 'andnotl': lambda: 'return {in0} && (!{in1});'.format(**fmtspec), 'notl': lambda: 'return !{in0};'.format(**fmtspec), 'shl': lambda: shift('shl', typ), 'shr': lambda: shift('shr', typ), 'shra': lambda: shift('shra', typ), 'reinterpret': lambda: reinterpret(totyp, typ), 'cvt': lambda: cvt(totyp, typ), 'adds': lambda: adds(typ), 'subs': lambda: subs(typ), 'rec': lambda: opnum('1.0{f} / {{in0}}'.format(f=f), typ), 'rec8': lambda: opnum('1.0{f} / {{in0}}'.format(f=f), typ), 'rec11': lambda: opnum('1.0{f} / {{in0}}'.format(f=f), typ), 'rsqrt': lambda: opnum('1.0{f} / nsimd_scalar_sqrt_{typ2}({{in0}})'. \ format(f=f, typ2=typ2), typ), 'rsqrt8': lambda: opnum('1.0{f} / nsimd_scalar_sqrt_{typ2}({{in0}})'. \ format(f=f, typ2=typ2), typ), 'rsqrt11': lambda: opnum('1.0{f} / nsimd_scalar_sqrt_{typ2}({{in0}})'. \ format(f=f, typ2=typ2), typ) } return func[operator.name]()
im = ax.imshow(data_v_vals_imaginary, cmap='cool', interpolation='nearest') # Create colorbar cbar_kw = {} cbarlabel = "" cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw) cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom") plt.title('heatmap with imaginary') plt.savefig('log/{}_valuemap_{}_imaginary.png'.format( args.env, timestep)) plt.clf() plt.close() if __name__ == '__main__': # Getting arguments from command line + defaults args = get_args() # creates copy of args for the real coordinates # this class compares space generated by neuralt network (in this case Bbox) with real coordinates) env = make_env(args) load_field_parameters(args) assert args.play_path is not None player = Player(args) player_imaginary = Player(args, direct_playpath=args.play_path_im_h) create_heatmap_qvalues(args=args, env=env, player=player, player_imaginary=player_imaginary)
model = "CNN" E = 1 project_name = f"{model}{NC}c{E}e{max_rounds}r{n_clients_per_round}f-{server_opt}-{client_opt_strategy[0]}-{client_opt}-scf" client_lr_lg = -0.5 server_lr_lg = 0 for client_lr_lg in np.arange(-1.5, 1.0, 0.5): client_lr = 10**client_lr_lg for server_lr_lg in np.arange(-1, 1.5, 0.5): server_lr = 10**server_lr_lg config = TorchFederatedLearnerEMNISTConfig( CLIENT_LEARNING_RATE=client_lr, CLIENT_OPT=common.get_name(client_opt), CLIENT_OPT_ARGS=common.get_args(client_opt), # CLIENT_OPT_L2=1e-4, CLIENT_OPT_STRATEGY=client_opt_strategy, SERVER_OPT=common.get_name(server_opt), SERVER_OPT_ARGS=common.get_args(server_opt), SERVER_LEARNING_RATE=server_lr, IS_IID_DATA=is_iid, BATCH_SIZE=B, CLIENT_FRACTION=C, N_CLIENTS=NC, N_EPOCH_PER_CLIENT=E, MAX_ROUNDS=max_rounds, MODEL=model, SCAFFOLD=True) config_technical = TorchFederatedLearnerTechnicalConfig( BREAK_ROUND=300,
from rabin import Rabin from common import get_args, sys ## sample file import os SAMPLE_FILE = os.path.join((os.path.abspath(os.path.dirname(__file__))), '..', 'sample.txt') if __name__ == '__main__': text, to_find = get_args(sys.argv[1:]) r = Rabin(to_find, text) r.search(use_rabin_fingerprint=True) if r.result: print("Pattern '{}' found at positions {}".format(to_find, r.result)) else: print("No match found for pattern '{}'".format(to_find))