def create_character_data_dict(self): data = { "CharacterName": names.get_first_name(), "XP": "0", "ProfBonus": sheet_maths.plus_or_minus(self.clss.proficiency_bonus), "Alignment": random.choice(random.choice(self.sheet_stuff["alignment"])), "Initiative": sheet_maths.plus_or_minus(self.mods["DEXmod"]), "HPMax": self.clss.hit_dice + self.mods["CONmod"], "HPCurrent": self.clss.hit_dice + self.mods["CONmod"], "Equipment": ", ".join(self.bg.equipment + self.clss.equipment), "ProficienciesLang": self.final_languages + "\n\n" + self.final_profs, "Features and Traits": utils.dict_to_string(self.race.features) + utils.dict_to_string(self.clss.features), "Passive": str(self.pp), "AC": self.ac, } # Checking proficient skill boxes for skill in self.final_skills: data[self.sheet_stuff["skills"][skill]["checkbox"]] = "Yes" for skill in self.sheet_stuff["skills"]: if skill in self.final_skills: data[skill] = sheet_maths.plus_or_minus( self.mods[self.sheet_stuff["skills"][skill]["mod"]] + self.sheet_stuff["prof"]) else: data[skill] = sheet_maths.plus_or_minus( self.mods[self.sheet_stuff["skills"][skill]["mod"]]) data.update(self.stats) data.update(sheet_maths.plus_or_minus_dict(self.mods)) data.update(sheet_maths.plus_or_minus_dict(self.saves)) data.update(self.race.race_to_dict()) data.update(self.bg.bg_to_dict()) data.update(self.clss.class_to_dict()) data.update(self.weapons) return data
def build_kernel_options(self, system, profile, distro, image, arch, kickstart_path): """ Builds the full kernel options line. """ management_interface = None if system is not None: blended = utils.blender(self.api, False, system) # find the first management interface try: for intf in system.interfaces.keys(): if system.interfaces[intf]["management"]: management_interface = intf break except: # just skip this then pass elif profile is not None: blended = utils.blender(self.api, False, profile) else: blended = utils.blender(self.api, False, image) append_line = "" kopts = blended.get("kernel_options", dict()) # since network needs to be configured again (it was already in netboot) when kernel boots # and we choose to do it dinamically, we need to set 'ksdevice' to one of # the interfaces' MAC addresses in ppc systems. # ksdevice=bootif is not useful in yaboot, as the "ipappend" line is a pxe feature. if system and arch and "ppc" in arch: for intf in system.interfaces.keys(): # use first interface with defined IP and MAC, since these are required # fields in a DHCP entry mac_address = system.interfaces[intf]['mac_address'] ip_address = system.interfaces[intf]['ip_address'] if mac_address and ip_address: kopts['ksdevice'] = mac_address break # support additional initrd= entries in kernel options. if "initrd" in kopts: append_line = ",%s" % kopts.pop("initrd") hkopts = utils.dict_to_string(kopts) append_line = "%s %s" % (append_line, hkopts) # kickstart path rewriting (get URLs for local files) if kickstart_path is not None and kickstart_path != "": # FIXME: need to make shorter rewrite rules for these URLs try: ipaddress = socket.gethostbyname_ex(blended["http_server"])[2][0] except socket.gaierror: ipaddress = blended["http_server"] if system is not None and kickstart_path.startswith("/"): kickstart_path = "http://%s/cblr/svc/op/ks/system/%s" % (ipaddress, system.name) elif kickstart_path.startswith("/"): kickstart_path = "http://%s/cblr/svc/op/ks/profile/%s" % (ipaddress, profile.name) if distro.breed is None or distro.breed == "redhat": append_line = "%s ks=%s" % (append_line, kickstart_path) gpxe = blended["enable_gpxe"] if gpxe: append_line = append_line.replace('ksdevice=bootif', 'ksdevice=${net0/mac}') elif distro.breed == "suse": append_line = "%s autoyast=%s" % (append_line, kickstart_path) elif distro.breed == "debian" or distro.breed == "ubuntu": append_line = "%s auto-install/enable=true priority=critical url=%s" % (append_line, kickstart_path) if management_interface: append_line += " netcfg/choose_interface=%s" % management_interface elif distro.breed == "freebsd": append_line = "%s ks=%s" % (append_line, kickstart_path) # rework kernel options for debian distros translations = {'ksdevice': "interface", 'lang': "locale"} for k, v in translations.iteritems(): append_line = append_line.replace("%s=" % k, "%s=" % v) # interface=bootif causes a failure append_line = append_line.replace("interface=bootif", "") elif distro.breed == "vmware": if distro.os_version.find("esxi") != -1: # ESXi is very picky, it's easier just to redo the # entire append line here since append_line = " ks=%s %s" % (kickstart_path, hkopts) # ESXi likes even fewer options, so we remove them too append_line = append_line.replace("kssendmac", "") else: append_line = "%s vmkopts=debugLogToSerial:1 mem=512M ks=%s" % \ (append_line, kickstart_path) # interface=bootif causes a failure append_line = append_line.replace("ksdevice=bootif", "") elif distro.breed == "xen": if distro.os_version.find("xenserver620") != -1: img_path = os.path.join("/images", distro.name) append_line = "append %s/xen.gz dom0_max_vcpus=2 dom0_mem=752M com1=115200,8n1 console=com1,vga --- %s/vmlinuz xencons=hvc console=hvc0 console=tty0 install answerfile=%s --- %s/install.img" % (img_path, img_path, kickstart_path, img_path) return append_line if distro is not None and (distro.breed in ["debian", "ubuntu"]): # Hostname is required as a parameter, the one in the preseed is # not respected, so calculate if we have one here. # We're trying: first part of FQDN in hostname field, then system # name, then profile name. # In Ubuntu, this is at least used for the volume group name when # using LVM. domain = "local.lan" if system is not None: if system.hostname is not None and system.hostname != "": # If this is a FQDN, grab the first bit hostname = system.hostname.split(".")[0] _domain = system.hostname.split(".")[1:] if _domain: domain = ".".join(_domain) else: hostname = system.name else: # ubuntu at the very least does not like having underscores # in the hostname. # FIXME: Really this should remove all characters that are # forbidden in hostnames hostname = profile.name.replace("_", "") # At least for debian deployments configured for DHCP networking # this values are not used, but specifying here avoids questions append_line = "%s hostname=%s" % (append_line, hostname) append_line = "%s domain=%s" % (append_line, domain) # A similar issue exists with suite name, as installer requires # the existence of "stable" in the dists directory append_line = "%s suite=%s" % (append_line, distro.os_version) elif distro is not None and distro.breed == 'nexenta': append_line = "-B iso_nfs_path=%s:/var/www/cobbler/links/%s,auto_install=1" % (blended['next_server'], distro.name) # append necessary kernel args for arm architectures if arch is not None and arch.startswith("arm"): append_line = "%s fixrtc vram=48M omapfb.vram=0:24M" % append_line # do variable substitution on the append line # promote all of the ksmeta variables if "ks_meta" in blended: blended.update(blended["ks_meta"]) append_line = self.templar.render(append_line, utils.flatten(blended), None) # FIXME - the append_line length limit is architecture specific if len(append_line) >= 255: self.logger.warning("warning: kernel option length exceeds 255") return append_line
def main(): work_root = os.environ['WORK'] config = get_config(default_config) print_with_rank(str(config)) # Read hyper parameters group_count = config['number_of_reservoirs'] feature_count = config['number_of_features'] lsp = config['overlap_size'] predict_length = config['prediction_size'] train_length = config['training_size'] approx_res_size = config['reservoir_size'] sigma = config['sigma'] radius = config['radius'] beta = config['beta'] degree = config['degree'] # Check preconditions for running if feature_count % group_count != 0: comm.Abort() return if group_count % size != 0: comm.Abort() return if rank == master_node_rank: all_data = load_data(work_root, 'data/QG_everydt_avgu.csv') else: all_data = None for shift in shifts: config['shift'] = shift if rank == master_node_rank: data = all_data[:, shift:train_length + shift] else: data = None model = ESNParallel(group_count, feature_count, lsp, train_length, predict_length, approx_res_size, radius, sigma, random_state=42, beta=beta, degree=degree).fit_reservoir(data) for beta in beta_optimization: model._beta = beta output = model.fit_output().predict() if rank == master_node_rank: config['beta'] = beta shift_folder = dict_to_string( {k: v for k, v in config.items() if k != 'shift'}) directory = os.path.join(work_root, 'results/beta_shift_results', shift_folder) if not os.path.exists(directory): os.makedirs(directory) result_path = os.path.join( directory, 'data=QG-' + dict_to_string(config) + '.txt') np.savetxt(result_path, output) print_with_rank("Saved to " + result_path)
def main(): param_grid = { 'group_count': [11], 'feature_count': [88], 'lsp': [7], 'train_length': [100000], 'predict_length': [1000], 'approx_res_size': [1000], 'radius': list(np.linspace(0.0001, 1, endpoint=False, num=1000)), 'sigma': list(np.linspace(0.0001, 1, num=1000)), 'random_state': [42], 'beta': [0.003], 'degree': [7], 'alpha': list(np.linspace(0.0001, 1, num=1000)), } shifts = list( range(0, param_grid['predict_length'][0] * shift_count, param_grid['predict_length'][0])) if rank == master_node_rank: work_root = os.environ['WORK'] all_data = load_data(work_root, 'data/QG_everydt_avgu.csv') else: all_data = None work_root = None max_evals = 500000 for i in range(max_evals): if rank == master_node_rank: params = {k: random.sample(v, 1)[0] for k, v in param_grid.items()} print_with_rank(str(params)) else: params = None params = comm.bcast(params, master_node_rank) for shift in shifts: if rank == master_node_rank: data = all_data[:, shift:params['train_length'] + shift] else: data = None output = ESNParallel(**params).fit(data).predict() if rank == master_node_rank: params['shift'] = shift shift_folder = dict_to_string( {k: v for k, v in params.items() if k != 'shift'}) directory = os.path.join(work_root, 'results/random_shift_results', shift_folder) if not os.path.exists(directory): os.makedirs(directory) result_path = os.path.join( directory, 'data=QG-' + dict_to_string(params) + '.txt') np.savetxt(result_path, output) print_with_rank("Saved to " + result_path) del params['shift']
def run_graph( self, workflow_file, full_result, comment, main, graph_args, disable_inmemory_cache, disable_file_cache, frozens_id, create_frozens, use_frozens, use_frozen_only_if_exists, cleanup, perfomance_logging, ): self.workflow_file = workflow_file self.comment = comment start_time = time.time() with open(workflow_file) as f: self.graph = f.read() self.db_register() message_delay = 60 * float( config.get('cuber', 'message_delay', fallback=3)) job_descritpion = '{}; {}'.format(workflow_file, self.comment) try: cube.Cube.checkpoints_dir = self.checkpoints_dir logging.info('Checkpoints dir: {}'.format( cube.Cube.checkpoints_dir)) wf = workflow.Workflow( workflow_file, main=main, graph_args=graph_args, frozens_dir=self.frozens_dir, frozens_id=frozens_id, create_frozens=create_frozens, use_frozens=use_frozens, use_frozen_only_if_exists=use_frozen_only_if_exists, ) self.db_update_status('running') data = wf.run( disable_inmemory_cache=disable_inmemory_cache, disable_file_cache=disable_file_cache, cleanup=cleanup, perfomance_logging=perfomance_logging, ) res = utils.dict_to_string(data, full=full_result) if time.time() - start_time >= message_delay: logging.critical( 'Calculation is done: {} (graph id: {})\n{}'.format( job_descritpion, self.db_id, res)) else: logging.info( 'Calculation is done: {} (graph id: {})\n{}'.format( job_descritpion, self.db_id, res)) self.db_save_result(res) self.db_update_status('done') except KeyboardInterrupt: if time.time() - start_time >= message_delay: logging.critical( 'Calculation is cancelled: {} (graph id: {})'.format( job_descritpion, self.db_id)) else: logging.error( 'Calculation is cancelled: {} (graph id: {})'.format( job_descritpion, self.db_id)) self.db_save_result('candelled') self.db_update_status('cancelled') except: import traceback traceback.print_exc() if time.time() - start_time >= message_delay: logging.critical( 'Calculation is failed: {} (graph id: {})'.format( job_descritpion, self.db_id)) else: logging.error( 'Calculation is failed: {} (graph id: {})'.format( job_descritpion, self.db_id)) self.db_update_status('failed')
def __run_graph(self, graph_, disable_inmemory_cache, disable_file_cache, cleanup, perfomance_logging): ''' TODO: improve excprions for incorrect graph ''' logger.debug('Graph to do: {}'.format(graph_)) graph_ = self.__fold_graph(graph_) graph_descriptor = graph_['name'] if 'name' in graph_ else str(graph_) graph_id = graph_[ 'name'] if 'name' in graph_ else utils.universal_hash(graph_) for key in {'module', 'class'}: if key not in graph_: raise GraphErrorSpecifiedSubgraph( 'Cube description must have {} parameter.'.format(key), subgraph=graph_descriptor) for key in graph_.keys(): graph_possible_params = { 'attrs', 'deps', 'class', 'module', 'comment', 'name', 'frozen', 'disable_inmemory_cache', 'disable_file_cache' } if key not in graph_possible_params: raise GraphErrorSpecifiedSubgraph('Cube description has param {} that is not allowed. Check for typos. Possible values: {}' \ .format(key, graph_possible_params), subgraph = graph_descriptor) def get_frozen_path(): frozen_path = os.path.join(self.frozens_dir, self.frozens_id, '{}.pkl'.format(graph_id)) frozen_path_dir = os.path.join(self.frozens_dir, self.frozens_id) logger.info('Frozen path: {}'.format(frozen_path)) return frozen_path, frozen_path_dir if utils.parse_bool(graph_.get('frozen', 'false')) and self.use_frozens and \ not os.path.isfile(get_frozen_path()[0]): if not self.use_frozen_only_if_exists: raise GraphErrorSpecifiedSubgraph( 'Frozen {} does not exists, but frozens are enabled and flag "use_frozens_only_if_exists" is not enabled.' .format(get_frozen_path()[0]), subgraph=graph_descriptor) if utils.parse_bool(graph_.get('frozen', 'false')) and self.use_frozens and \ os.path.isfile(get_frozen_path()[0]): logger.info('Loading from frozen') with open(get_frozen_path()[0], 'rb') as f: return pickle.load(f) attrs = copy.deepcopy(graph_.get('attrs', {})) attrs = self.__substitute_graph_args(attrs) for i, dep_ in enumerate(graph_.get('deps', {})): dep = dep_ if isinstance(dep_, dict) else {'graph': dep_} dep_descriptor = dep['name'] if isinstance( dep, dict) and 'name' in dep else '{}-th dep (zero-based)'.format(i) for key in {'graph'}: if key not in dep: raise GraphErrorSpecifiedDep( 'Dep description must have {} parameter.'.format(key), subgraph=graph_descriptor, dep=dep_descriptor) for key in dep.keys(): dep_possible_params = { 'fields', 'graph', 'prefix', 'comment', 'name', 'enable_if' } if key not in dep_possible_params: raise GraphErrorSpecifiedDep('Dep description has param {} that is not allowed. Check for typos. Possible values: {}' \ .format(key, dep_possible_params), subgraph = graph_descriptor, dep = dep_descriptor) if 'enable_if' in dep: if not self.eval_expression(dep['enable_if']): logger.info( 'Skip dependecy "{}" of "{}" because if clause is false' .format(dep_descriptor, graph_descriptor)) continue res = self.__run_graph( dep['graph'], disable_inmemory_cache=disable_inmemory_cache, disable_file_cache=disable_file_cache, cleanup=cleanup, perfomance_logging=perfomance_logging, ) if not isinstance(res, dict): raise GraphErrorSpecifiedDep('You may not use non-dict-result cube as a dependency. Result data ({}): {}.' \ .format(type(res), res), subgraph = graph_descriptor, dep = dep_descriptor) if 'fields' not in dep: for key in res: attr_key = dep.get('prefix', '') + key if attr_key in attrs: raise GraphErrorSpecifiedDep('Argument "{}" for is not unique.' \ .format(attr_key), subgraph = graph_descriptor, dep = dep_descriptor) attrs[attr_key] = res[key] else: for new_key, old_key_ in dep['fields'].iteritems(): attr_key = dep.get('prefix', '') + new_key pack_to_dict = None if isinstance(old_key_, basestring): old_key = old_key_ if old_key_ != '$' else new_key elif isinstance(old_key_, dict): old_key = old_key_['source_field'] if old_key_[ 'source_field'] != '$' else new_key pack_to_dict = old_key_.get('pack_to_dict', None) if pack_to_dict is None: if attr_key in attrs: raise GraphErrorSpecifiedDep('Argument "{}" for is not unique.' \ .format(attr_key), subgraph = graph_descriptor, dep = dep_descriptor) if old_key not in res: raise GraphErrorSpecifiedDep( 'Field "{}" is not got from dependency. Got: {}' .format(old_key, ', '.join(res.keys())), subgraph=graph_descriptor, dep=dep_descriptor) attrs[attr_key] = res[old_key] else: if pack_to_dict not in attrs: attrs[pack_to_dict] = {} if attr_key in attrs[pack_to_dict]: raise GraphErrorSpecifiedDep('Argument "{}" for is not unique for packing dict "{}".' \ .format(attr_key, pack_to_dict), subgraph = graph_descriptor, dep = dep_descriptor) if old_key not in res: raise GraphErrorSpecifiedDep( 'Field "{}" is not got from dependency. Got: {}' .format(old_key, ', '.join(res.keys())), subgraph=graph_descriptor, dep=dep_descriptor) attrs[pack_to_dict][attr_key] = res[old_key] module = importlib.import_module(graph_['module']) logger.debug('Attrs keys: {}'.format(attrs.keys())) try: cube_init = getattr(module, graph_['class'])(**attrs) except Exception as e: logging.error('Faild to init cube:\nCube: {cube}\nGraph part: {graph_part}\nAttrs: {attrs}\nError: {error}\nTraceback: {tb}' \ .format( cube = graph_['module'], graph_part = str(graph_), attrs = utils.dict_to_string(attrs, brackets = True), error = str(e), tb = traceback.format_exc(), ) ) raise try: res = cube_init.get( disable_inmemory_cache=disable_inmemory_cache or utils.parse_bool( graph_.get('disable_inmemory_cache', 'false')), disable_file_cache=disable_file_cache or utils.parse_bool(graph_.get('disable_file_cache', 'false')), cleanup=cleanup, perfomance_logging=perfomance_logging, ) except Exception as e: logging.error('Faild to cube.get():\nCube: {cube}\nGraph part: {graph_part}\nAttrs: {attrs}\nError: {error}\nTraceback: {tb}' \ .format( cube = graph_['module'], graph_part = str(graph_), attrs = utils.dict_to_string(attrs, brackets = True), error = str(e), tb = traceback.format_exc(), ) ) raise if utils.parse_bool(graph_.get('frozen', 'false')) and self.create_frozens: frozen_path, frozen_path_dir = get_frozen_path() if not os.path.isdir(frozen_path_dir): os.makedirs(frozen_path_dir) with open(frozen_path, 'wb') as f: pickle.dump(res, f) logger.info('Frozen point created') return res