def test_ephasor_writer(): from ditto.writers.ephasor.write import Writer from ditto.models.node import Node from ditto.models.line import Line from ditto.models.load import Load from ditto.models.regulator import Regulator from ditto.models.wire import Wire from ditto.models.capacitor import Capacitor from ditto.models.powertransformer import PowerTransformer from ditto.models.winding import Winding from ditto.models.phase_winding import PhaseWinding from ditto.store import Store from ditto.models.base import Unicode from ditto.models.power_source import PowerSource from ditto.models.feeder_metadata import Feeder_metadata m = Store() src = PowerSource( m, name="f1_src", phases=[Unicode("A"), Unicode("B"), Unicode("C")], nominal_voltage=12470, connecting_element="n1", is_sourcebus=1, ) meta = Feeder_metadata( m, name="f1", nominal_voltage=12470, headnode="f1_src", substation="f1_src" ) node1 = Node(m, name="n1", feeder_name="f1") node2 = Node(m, name="n2", feeder_name="f1") node3 = Node(m, name="n3", feeder_name="f1") wirea = Wire(m, gmr=1.3, X=2, Y=20) wiren = Wire(m, gmr=1.2, X=2, Y=20) line1 = Line( m, name="l1", wires=[wirea, wiren], from_element="n1", to_element="n2", feeder_name="f1", ) load1 = Load(m, name="load1", p=5400, q=2615.3394, feeder_name="f1") phase_winding = PhaseWinding(m, phase=u"A") winding1 = Winding( m, phase_windings=[phase_winding], connecting_element="n2", connection_type="Y", nominal_voltage=12.47, rated_power=25, resistance=10, ) winding2 = Winding( m, phase_windings=[phase_winding], connecting_element="l1", connection_type="Y", nominal_voltage=6.16, rated_power=25, resistance=10, ) transformer1 = PowerTransformer( m, name="t1", from_element="n2", to_element="n3", windings=[winding1, winding2], feeder_name="f1", ) transformer1.reactances.append(6) # reg1 = Regulator(m, name='t1_reg', connected_transformer='t1', connected_winding=2, pt_ratio=60, delay=2) # cap1 = Capacitor(m, name='cap1', connecting_element='n2', num_phases=3, nominal_voltage=7.2, var=300, connection_type='Y') m.set_names() t = tempfile.TemporaryDirectory() writer = Writer(output_path=t.name, log_path="./") writer.write(m)
def main(): parser = argparse.ArgumentParser() parser.add_argument("--checkpoint", required=True, help="directory with checkpoint to resume training from or use for testing") parser.add_argument("--output_file", required=True, help="where to write output") args = parser.parse_args() model_path = None with open(os.path.join(args.checkpoint, "checkpoint")) as f: for line in f: line = line.strip() if line == "": continue key, _sep, val = line.partition(": ") val = val[1:-1] # remove quotes if key == "model_checkpoint_path": model_path = val if model_path is None: raise Exception("failed to find model path") checkpoint_file = os.path.join(args.checkpoint, model_path) with tempfile.TemporaryDirectory() as tmp_dir: cmd = ["python", "-u", os.path.join(SCRIPT_DIR, "dump_checkpoints/dump_checkpoint_vars.py"), "--model_type", "tensorflow", "--output_dir", tmp_dir, "--checkpoint_file", checkpoint_file] sp.check_call(cmd) with open(os.path.join(tmp_dir, "manifest.json")) as f: manifest = json.loads(f.read()) names = [] for key in manifest.keys(): if not key.startswith("generator") or "Adam" in key or "_loss" in key or "_train" in key or "_moving_" in key: continue names.append(key) names = sorted(names) arrays = [] for name in names: value = manifest[name] with open(os.path.join(tmp_dir, value["filename"]), "rb") as f: arr = np.frombuffer(f.read(), dtype=np.float32).copy().reshape(value["shape"]) arrays.append(arr) shapes = [] for name, arr in zip(names, arrays): shapes.append(dict( name=name, shape=arr.shape, )) flat = np.hstack([arr.reshape(-1) for arr in arrays]) start = time.time() index = log_quantize(flat, mu=255, bins=256).astype(np.float32) print("index found in %0.2fs" % (time.time() - start)) print("quantizing") encoded = np.zeros(flat.shape, dtype=np.uint8) elem_count = 0 for i, x in enumerate(flat): distances = np.abs(index - x) nearest = np.argmin(distances) encoded[i] = nearest elem_count += 1 if elem_count % 1000000 == 0: print("rate", int(elem_count / (time.time() - start))) with open(args.output_file, "wb") as f: def write(name, buf): print("%s bytes %d" % (name, len(buf))) f.write(struct.pack(">L", len(buf))) f.write(buf) write("shape", json.dumps(shapes).encode("utf8")) write("index", index.tobytes()) write("encoded", encoded.tobytes())
def __init__(self, vae_config, model_config, batch_size, vae_checkpoint_dir_or_path=None, model_checkpoint_dir_or_path=None, model_var_pattern=None, session_target='', **sample_kwargs): if tf.gfile.IsDirectory(vae_checkpoint_dir_or_path): vae_checkpoint_path = tf.train.latest_checkpoint( vae_checkpoint_dir_or_path) else: vae_checkpoint_path = vae_checkpoint_dir_or_path if tf.gfile.IsDirectory(model_checkpoint_dir_or_path): model_checkpoint_path = tf.train.latest_checkpoint( model_checkpoint_dir_or_path) else: model_checkpoint_path = model_checkpoint_dir_or_path self._config = _update_config(model_config, vae_config) self._config.data_converter.set_mode('infer') self._config.hparams.batch_size = batch_size with tf.Graph().as_default(): model = self._config.model model.build( self._config.hparams, self._config.data_converter.output_depth, encoder_train=False, decoder_train=False, ) # Input placeholders self._temperature = tf.placeholder(tf.float32, shape=()) if self._config.hparams.z_size: self._latent_z_input = tf.placeholder( tf.float32, shape=[batch_size, self._config.hparams.encoded_z_size]) else: self._latent_z_input = None if self._config.data_converter.control_depth > 0: self._c_input = tf.placeholder( tf.float32, shape=[None, self._config.data_converter.control_depth]) else: self._c_input = None self._inputs = tf.placeholder( tf.float32, shape=[ batch_size, None, self._config.data_converter.input_depth ]) self._controls = tf.placeholder( tf.float32, shape=[ batch_size, None, self._config.data_converter.control_depth ]) self._inputs_length = tf.placeholder( tf.int32, shape=[batch_size] + list(self._config.data_converter.length_shape)) self._max_length = tf.placeholder(tf.int32, shape=()) # Outputs self._outputs, self._decoder_results = model.sample( batch_size, max_length=self._max_length, latent_z=self._latent_z_input, c_input=self._c_input, temperature=self._temperature, **sample_kwargs) vae_var_list = [] model_var_list = [] for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES): flag = False for pattern in model_var_pattern: if re.search(pattern, v.name): flag = True model_var_list.append(v) if not flag: vae_var_list.append(v) # Restore vae graph part self._sess = tf.Session(target=session_target) vae_saver = tf.train.Saver(vae_var_list) if os.path.exists(vae_checkpoint_path) and tarfile.is_tarfile( vae_checkpoint_path): tf.logging.info('Unbundling vae checkpoint.') with tempfile.TemporaryDirectory() as temp_dir: tar = tarfile.open(vae_checkpoint_path) tar.extractall(temp_dir) # Assume only a single checkpoint is in the directory. for name in tar.getnames(): if name.endswith('.index'): vae_checkpoint_path = os.path.join( temp_dir, name[0:-6]) break vae_saver.restore(self._sess, vae_checkpoint_path) else: vae_saver.restore(self._sess, vae_checkpoint_path) # Restore model graph part model_saver = tf.train.Saver(model_var_list) if os.path.exists(vae_checkpoint_path) and tarfile.is_tarfile( model_checkpoint_path): tf.logging.info('Unbundling model checkpoint.') with tempfile.TemporaryDirectory() as temp_dir: tar = tarfile.open(vae_checkpoint_path) tar.extractall(temp_dir) # Assume only a single checkpoint is in the directory. for name in tar.getnames(): if name.endswith('.index'): model_checkpoint_path = os.path.join( temp_dir, name[0:-6]) break model_saver.restore(self._sess, model_checkpoint_path) else: model_saver.restore(self._sess, model_checkpoint_path)
def setUp(self): self.tmp_dir = tempfile.TemporaryDirectory() if self.use_tmp: os.environ['CMDSTANPY_MODEL_PATH'] = self.tmp_dir.name
def doExport(self, locator, obj, fmt="list", marshalHelper=None, numParts=None, **kwargs): """Serialize the input object at locator path in specified format. The input object is optionally preprocessed by the helper method. Args: locator (str): target path or URI obj (object): data to be serialized fmt (str, optional): format for serialization (mmcif, tdd, csv, list). Defaults to "list". marshalHelper (method, optional): pre-processor method applied to input data object. Defaults to None. numParts (int, optional): serialize the data in parts. Defaults to None. (json and pickle formats) Returns: bool: True for sucess or False otherwise """ try: ret = False localFlag = self.__fileU.isLocal(locator) if marshalHelper: myObj = marshalHelper(obj, **kwargs) else: myObj = obj # if localFlag and numParts and fmt in ["json", "pickle"]: localFilePath = self.__fileU.getFilePath(locator) ret = self.__ioU.serializeInParts(localFilePath, myObj, numParts, fmt=fmt, **kwargs) elif localFlag: localFilePath = self.__fileU.getFilePath(locator) ret = self.__ioU.serialize(localFilePath, myObj, fmt=fmt, workPath=self.__workPath, **kwargs) else: with tempfile.TemporaryDirectory( suffix=self.__workDirSuffix, prefix=self.__workDirPrefix, dir=self.__workPath) as tmpDirName: # write a local copy then copy to destination - # localFilePath = os.path.join( self.__workPath, tmpDirName, self.__fileU.getFileName(locator)) ok1 = self.__ioU.serialize(localFilePath, myObj, fmt=fmt, workPath=self.__workPath, **kwargs) ok2 = True if ok1: ok2 = self.__fileU.put(localFilePath, locator, **kwargs) ret = ok1 and ok2 except Exception as e: logger.exception("Exporting locator %r failing with %s", locator, str(e)) return ret
def process(self, inputFile1): with tempfile.TemporaryDirectory() as tmpdirname: return self._process(inputFile1, tmpdirname)
def test_output(capsys): with tempfile.TemporaryDirectory( ) as tmpdir: # we can't use tmpdir from pytest because of Python 2.7 compatibity g.result = "" def my_output(s): g.result += s + "\n" hello = "world" y(hello, output=print) out, err = capsys.readouterr() assert out == "y| hello: 'world'\n" assert err == "" y(hello, output=sys.stdout) out, err = capsys.readouterr() assert out == "y| hello: 'world'\n" assert err == "" y(hello, output="stdout") out, err = capsys.readouterr() assert out == "y| hello: 'world'\n" assert err == "" y(hello, output="") out, err = capsys.readouterr() assert out == "" assert err == "" y(hello, output="null") out, err = capsys.readouterr() assert out == "" assert err == "" y(hello, output=print) out, err = capsys.readouterr() assert out == "y| hello: 'world'\n" assert err == "" if True: path = Path(tmpdir) / "x0" y(hello, output=path) out, err = capsys.readouterr() assert out == "" assert err == "" with path.open("r") as f: assert f.read() == "y| hello: 'world'\n" path = Path(tmpdir) / "x1" y(hello, output=path) out, err = capsys.readouterr() assert out == "" assert err == "" with path.open("r") as f: assert f.read() == "y| hello: 'world'\n" path = Path(tmpdir) / "x2" with path.open("a+") as f: y(hello, output=f) with pytest.raises(TypeError): # closed file y(hello, output=f) out, err = capsys.readouterr() assert out == "" assert err == "" with path.open("r") as f: assert f.read() == "y| hello: 'world'\n" with pytest.raises(TypeError): y(hello, output=1) y(hello, output=my_output) y(1, output=my_output) out, err = capsys.readouterr() assert out == "" assert err == "" assert g.result == "y| hello: 'world'\ny| 1\n" def test_serialize(capsys): def serialize(s): return repr(s) + " [len=" + str(len(s)) + "]" hello = "world" y(hello, serialize=serialize) out, err = capsys.readouterr() assert err == "y| hello: 'world' [len=5]\n" def test_show_time(capsys): hello = "world" y(hello, show_time=True) out, err = capsys.readouterr() assert err.endswith("hello: 'world'\n") assert "@ " in err def test_show_delta(capsys): hello = "world" y(hello, show_delta=True) out, err = capsys.readouterr() assert err.endswith("hello: 'world'\n") assert "delta=" in err def test_as_str(capsys): hello = "world" s = y(hello, as_str=True) y(hello) out, err = capsys.readouterr() assert err == s with pytest.raises(TypeError): @y(as_str=True) def add2(x): return x + 2 with pytest.raises(TypeError): with y(as_str=True): pass
def run(args): # setup logging log, _ = job_utils.setup_logging('', {}) # open the model file if os.path.exists(args.model): model_file = open(args.model, 'rb') else: if download_utils.is_url(args.model): model_url = args.model else: model_url = download_utils.url_for_file(args.model + '.mm-model', args.urls_file, 'models') model_file = download_utils.open_url_cached(model_url, 'rb', args.force_download) # load the model with job_utils.log_step('loading model'): model_data = joblib.load(model_file) if model_data['sklearn_version'] != sklearn.__version__: log.warning('the version of scikit-learn installed now is ' 'different from the one used during training ' '(%s vs %s), you may experience issues', model_data['sklearn_version'], sklearn.__version__) # compute CGRs for inputs with job_utils.log_step('computing input CGRs'): with tempfile.TemporaryDirectory() as temp_dir: cgrs_file = os.path.join(temp_dir, 'cgrs.mm-repr') options = dict(model_data['generation_options'], fasta_output_dir=args.files, output_file=cgrs_file, disable_avx=args.disable_avx) backend.run_backend_kmers(options, {}) cgrs = [] reader = kameris_formats.repr_reader(cgrs_file) for i in range(reader.count): cgrs.append(reader.read_matrix(i, flatten=True)) reader.file.close() # get list of input files filenames = sorted(f for f in os.listdir(args.files) if os.path.isfile(os.path.join(args.files, f))) # run predictions with job_utils.log_step('running predictions'): predictor = model_data['predictor'] if hasattr(predictor, 'predict_proba'): results = predictor.predict_proba(cgrs) else: results = predictor.predict(cgrs) # build and write results if hasattr(predictor, 'predict_proba'): results = dict(zip(filenames, [ sorted(zip(predictor.classes_, result), reverse=True, key=lambda r: r[1]) for result in results ])) else: results = dict(zip(filenames, results)) with open('results.json', 'w') as file: json.dump(results, file) log.info('wrote results to results.json') # print results print() print('Top-1 prediction summary:') print(tabulate(zip( filenames, [ '{} ({:.1%})'.format(*results[f][0]) if isinstance(results[f], list) else results[f] for f in filenames ] )))
def get_unique_temp_dir(self): return tempfile.TemporaryDirectory()
def test_writes(self): #=================================== # Defining sub functions #=================================== def _oned_int(c): return range(c) def _oned_str(c): return [str(i).encode('ascii') for i in range(c)] def _twooned_int(c): return [[i] for i in range(c)] def _twooned_str(c): return [[str(i).encode('ascii')] for i in range(c)] def _twotwod_int(c): return [[i, i] for i in range(c)] def _twotwod_str(c): return [[str(i).encode('ascii'), b"hello"] for i in range(c)] #def _twotwod_U(c): # return [[str(i).encode('UTF-8'),u"hello"] for i in range(c)] def _none(c): return None def _zero(c): return np.empty([c, 0], dtype='S') #=================================== # Starting main function #=================================== logging.info("starting 'test_writes'") np.random.seed(0) temp_dir = tempfile.TemporaryDirectory("pstreader") output_template = temp_dir.name + '/writes.{0}.{1}' i = 0 for row_count in [5, 2, 1, 0]: for col_count in [4, 2, 1, 0]: for val_shape in [3, None, 1]: val = np.random.normal(.5, 2, size=( row_count, col_count)) if val_shape is None else np.random.normal( .5, 2, size=(row_count, col_count, val_shape)) for row_or_col_gen in [ _oned_int, _oned_str, _twooned_int, _twooned_str, _twotwod_int, _twotwod_str ]: #!!!,_twotwod_U can't roundtrop Unicode in hdf5 row = row_or_col_gen(row_count) col = row_or_col_gen(col_count) for prop_gen in [ _none, _oned_str, _oned_int, _twooned_int, _twooned_str, _twotwod_int, _twotwod_str, _zero ]: #!!!_twotwod_U can't round trip Unicode because Hdf5 doesn't like it. row_prop = prop_gen(row_count) col_prop = prop_gen(col_count) pstdata = PstData(row, col, val, row_prop, col_prop, str(i)) for the_class, suffix in [(PstMemMap, "memmap"), (PstHdf5, "hdf5"), (PstNpz, "npz")]: filename = output_template.format(i, suffix) logging.info(filename) i += 1 the_class.write(filename, pstdata) reader = the_class( filename ) if suffix != 'hdf5' else the_class( filename, block_size=3) _fortesting_JustCheckExists().input(reader) for subsetter in [None, np.s_[::2, ::3]]: subreader = reader if subsetter is None else reader[ subsetter[0], subsetter[1]] expected = pstdata if subsetter is None else pstdata[ subsetter[0], subsetter[1]].read() for order in ['C', 'F', 'A']: for force_python_only in [True, False]: readdata = subreader.read( order=order, force_python_only= force_python_only) assert np.array_equal( readdata.val, expected.val) assert np.array_equal( readdata.row, expected.row) assert np.array_equal( readdata.col, expected.col) assert np.array_equal( readdata.row_property, expected.row_property ) or ( readdata.row_property.shape[1] == 0 and expected.row_property.shape[1] == 0) assert np.array_equal( readdata.col_property, expected.col_property ) or ( readdata.col_property.shape[1] == 0 and expected.col_property.shape[1] == 0) if suffix in {'memmap', 'hdf5'}: reader.flush() os.remove(filename) temp_dir.cleanup() logging.info("done with 'test_writes'")
def __init__(self, config, batch_size, checkpoint_dir_or_path=None, var_name_substitutions=None, session_target='', **sample_kwargs): if tf.gfile.IsDirectory(checkpoint_dir_or_path): checkpoint_path = tf.train.latest_checkpoint( checkpoint_dir_or_path) else: checkpoint_path = checkpoint_dir_or_path self._config = copy.deepcopy(config) self._config.data_converter.set_mode('infer') self._config.hparams.batch_size = batch_size with tf.Graph().as_default(): model = self._config.model model.build(self._config.hparams, self._config.data_converter.output_depth, is_training=False) self.ae = model.ae self.shared_z = model.shared_z # Force all layers to be created. # model._compute_model_loss( # input_sequence=np.random.randint(low=0, high=2, size=(2, 32, 90)).astype(np.bool), # output_sequence=np.random.randint(low=0, high=2, size=(2, 32, 90)).astype(np.bool), # sequence_length=np.array([32, 32]).astype(np.int32), # control_sequence=None, # image_input=np.random.normal(size=(2, 64, 64, 3)).astype(np.float32) # ) # dist, *shapes = self.ae.encode_var_new(np.random.normal(size=(2, 64, 64, 3)).astype(np.float32)) # self.ae.decode_var_new(dist.sample(), *shapes) #img = np.random.normal(size=(1, 240, 320, 3)).astype(np.float32) # data = tf.data.Dataset.from_tensors(img).make_one_shot_iterator().get_next() # self.vae.encode(data, config.hparams) # Actually create the model and add it to computation graph # ^ This is incredibly hacky and should be replaced with a better solution. # Input placeholders self._temperature = tf.placeholder(tf.float32, shape=()) if self._config.hparams.z_size: self._z_input = tf.placeholder( tf.float32, shape=[batch_size, self._config.hparams.z_size]) else: self._z_input = None if self._config.data_converter.control_depth > 0: self._c_input = tf.placeholder( tf.float32, shape=[None, self._config.data_converter.control_depth]) else: self._c_input = None self._inputs = tf.placeholder( tf.float32, shape=[ batch_size, None, self._config.data_converter.input_depth ]) self._controls = tf.placeholder( tf.float32, shape=[ batch_size, None, self._config.data_converter.control_depth ]) self._inputs_length = tf.placeholder( tf.int32, shape=[batch_size] + list(self._config.data_converter.length_shape)) self._max_length = tf.placeholder(tf.int32, shape=()) # Outputs self._outputs, self._decoder_results = model.sample( batch_size, max_length=self._max_length, z=self._z_input, c_input=self._c_input, temperature=self._temperature, **sample_kwargs) if self._config.hparams.z_size: q_z = model.encode(self._inputs, self._inputs_length, self._controls) self._mu = q_z.loc self._sigma = q_z.scale.diag self._z = q_z.sample() self.image_input = tf.placeholder(tf.float32, shape=(None, 64, 64, 3)) dist, *shapes = self.ae.encode_var_new(self.image_input) self.image_z = self.shared_z(dist.sample()) self.latent_input = tf.placeholder(tf.float32, shape=(None, 256)) self.image_recons = self.ae.decode_var_new(self.latent_input, *shapes) self.pass_thr_shared = self.shared_z(self.latent_input) var_map = None if var_name_substitutions is not None: var_map = {} for v in tf.global_variables(): var_name = v.name[:-2] # Strip ':0' suffix. for pattern, substitution in var_name_substitutions: var_name = re.sub(pattern, substitution, var_name) if var_name != v.name[:-2]: tf.logging.info('Renaming `%s` to `%s`.', v.name[:-2], var_name) var_map[var_name] = v # Restore graph self._sess = tf.Session(target=session_target) saver = tf.train.Saver(var_map) if (os.path.exists(checkpoint_path) and tarfile.is_tarfile(checkpoint_path)): tf.logging.info('Unbundling checkpoint.') with tempfile.TemporaryDirectory() as temp_dir: tar = tarfile.open(checkpoint_path) tar.extractall(temp_dir) # Assume only a single checkpoint is in the directory. for name in tar.getnames(): if name.endswith('.index'): checkpoint_path = os.path.join( temp_dir, name[0:-6]) break saver.restore(self._sess, checkpoint_path) else: saver.restore(self._sess, checkpoint_path)
def modify_font(f_otf, f_otf2, table, clean=True, is_kern=False, working_dir='.'): print table font = ttLib.TTFont(f_otf) charmaps = {} for key, vals in font['cmap'].buildReversed().items(): for val in vals: charmaps[unichr(val)] = key # nonbreakingspace is not always the same thing try: nbsp_name = charmaps[unichr(160)] except KeyError: charmaps[unichr(160)] = charmaps[' '] nbsp_name = charmaps[unichr(160)] #print ' ' in charmaps #raise KeyError("Can't find a non-breaking space in the font!") # Add the space proxy to the table for key, val in table.items(): if val == ' ': table[key] = nbsp_name org_dir = os.getcwd() with tempfile.TemporaryDirectory() as temp_dir: os.chdir(temp_dir) f_xml = 'font_info.ttx' f_xml2 = 'modified_font_info.ttx' cmd = 'ttx -q -o {} {}'.format(f_xml, os.path.join(org_dir, f_otf)) subprocess.call(cmd, shell=True) with open(f_xml, 'rb') as FIN: soup = bs4.BeautifulSoup(FIN.read(), 'xml') salad = copy(soup) salad_mtx = salad.find('hmtx') soup_mtx = soup.find('hmtx') salad_CFF = salad.find('CFF') soup_CFF = soup.find('CFF') salad_kern = salad.find('kern') soup_kern = soup.find('kern') # Get the proper names for the table ptable = {} for key, val in table.iteritems(): if key == val: continue if val in unichr(160): val = nbsp_name mtx_key = salad_mtx.find('mtx', {"name": key}) mtx_val = soup_mtx.find('mtx', {"name": val}) # If keys are missing, need to use value from charmap if mtx_key is None: key = charmaps[key] mtx_key = salad_mtx.find('mtx', {"name": key}) if mtx_val is None: val = charmaps[val] mtx_val = salad_mtx.find('mtx', {"name": val}) # If keys are still missing, we don't know what the eff this is if mtx_key is None: raise KeyError("font missing character '%s'" % key) if mtx_val is None: raise KeyError("font missing character '%s'" % val) # Now we know the name ptable[key] = val # Swap the values for key, val in ptable.iteritems(): mtx_key = salad_mtx.find('mtx', {"name": key}) mtx_val = soup_mtx.find('mtx', {"name": val}) # Swap the correct width, lsb mtx_key['width'] = int(mtx_val['width']) mtx_key['lsb'] = int(mtx_val['lsb']) #if key == nbsp_name: # mtx_key['width'] = int(mtx_key['width']*1.15) # Swap the CharString contents = soup_CFF.find('CharString', {"name": val}).text salad_CFF.find('CharString', {"name": key}).string = contents if soup_kern is None or is_kern == False: soup_kern = bs4.BeautifulSoup("", 'lxml') for pair in soup_kern.find_all('pair'): L, R = pair['l'], pair['r'] is_L = L in ptable.values() is_R = R in ptable.values() args = {'l': L, 'r': R} if is_L and not is_R: salad_kern.find('pair', args)['l'] = L print "Kerning", pair['l'] elif is_R and not is_L: salad_kern.find('pair', args)['r'] = R elif is_R and is_R: salad_kern.find('pair', args)['l'] = L salad_kern.find('pair', args)['r'] = R if clean: clean_font(salad, table, charmaps) with open(f_xml2, 'wb') as FOUT: FOUT.write(salad.prettify('utf-8')) cmd = 'ttx -q --recalc-timestamp -b {}'.format(f_xml2) subprocess.call(cmd, shell=True) f_out = f_xml2.replace('.ttx', '.otf') f_final_save = os.path.join(org_dir, working_dir, f_otf2) # Try to create the target directory if it doesn't exist try: os.makedirs(os.path.dirname(f_final_save)) except OSError: pass shutil.move(f_out, f_final_save) os.chdir(org_dir)
def _zip_package(package_root, includes, excludes=None, dockerize_pip=False, follow_symlinks=False, python_path=None, requirements_files=None, use_pipenv=False, **kwargs): """Create zip file in memory with package dependencies. Args: package_root (str): Base directory to copy files from. includes (List[str]): Inclusion patterns. Only files matching those patterns will be included in the result. excludes (List[str]): Exclusion patterns. Files matching those patterns will be excluded from the result. Exclusions take precedence over inclusions. dockerize_pip (Union[bool, str]): Whether to use docker or under what conditions docker will be used to run ``pip``. follow_symlinks (bool): If true, symlinks will be included in the resulting zip file. python_path (Optional[str]): Explicit python interpreter to be used. pipenv must be installed and executable using ``-m`` if provided. requirements_files (Dict[str, bool]): Map of requirement file names and wether they exist. use_pipenv (bool): Wether to use pipenv to export a Pipfile as requirements.txt. kwargs (Any): Advanced options for subprocess and docker. See source code to determine what is supported. Returns: Tuple[str, str]: Content of the ZIP file as a byte string and calculated hash of all the files """ kwargs.setdefault('pipenv_timeout', 300) temp_root = os.path.join(os.path.expanduser('~'), '.runway_cache') if not os.path.isdir(temp_root): os.makedirs(temp_root) # exclude potential virtual environments in the package excludes.append('.venv/') with tempfile.TemporaryDirectory(prefix='cfngin', dir=temp_root) as tmpdir: tmp_req = os.path.join(tmpdir, 'requirements.txt') copydir(package_root, tmpdir, includes, excludes, follow_symlinks) tmp_req = handle_requirements(package_root=package_root, dest_path=tmpdir, requirements=requirements_files, python_path=python_path, use_pipenv=use_pipenv, pipenv_timeout=kwargs['pipenv_timeout']) if should_use_docker(dockerize_pip): dockerized_pip(tmpdir, **kwargs) else: pip_cmd = [ python_path or sys.executable, '-m', 'pip', 'install', '-t', tmpdir, '-r', tmp_req ] pip_proc = subprocess.Popen(pip_cmd, cwd=tmpdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if int(sys.version[0]) > 2: _stdout, stderr = pip_proc.communicate( timeout=kwargs.get('pipenv_timeout', 900)) else: _stdout, stderr = pip_proc.communicate() if pip_proc.returncode != 0: if int(sys.version[0]) > 2: stderr = stderr.decode('UTF-8') LOGGER.error('"%s" failed with the following output:\n%s', ' '.join(pip_cmd), stderr) raise PipError req_files = _find_files(tmpdir, includes='**', follow_symlinks=False) return _zip_files(req_files, tmpdir)
def _zip_package(package_root, includes, excludes=None, dockerize_pip=False, follow_symlinks=False, python_path=None, requirements_files=None, use_pipenv=False, **kwargs): """Create zip file in memory with package dependencies. Args: package_root (str): Base directory to copy files from. includes (List[str]): Inclusion patterns. Only files matching those patterns will be included in the result. excludes (List[str]): Exclusion patterns. Files matching those patterns will be excluded from the result. Exclusions take precedence over inclusions. dockerize_pip (Union[bool, str]): Whether to use docker or under what conditions docker will be used to run ``pip``. follow_symlinks (bool): If true, symlinks will be included in the resulting zip file. python_path (Optional[str]): Explicit python interpreter to be used. pipenv must be installed and executable using ``-m`` if provided. requirements_files (Dict[str, bool]): Map of requirement file names and wether they exist. use_pipenv (bool): Wether to use pipenv to export a Pipfile as requirements.txt. kwargs (Any): Advanced options for subprocess and docker. See source code to determine what is supported. Returns: Tuple[str, str]: Content of the ZIP file as a byte string and calculated hash of all the files """ kwargs.setdefault('pipenv_timeout', 300) temp_root = os.path.join(os.path.expanduser('~'), '.runway_cache') if not os.path.isdir(temp_root): os.makedirs(temp_root) # exclude potential virtual environments in the package excludes.append('.venv/') with tempfile.TemporaryDirectory(prefix='cfngin', dir=temp_root) as tmpdir: tmp_req = os.path.join(tmpdir, 'requirements.txt') copydir(package_root, tmpdir, includes, excludes, follow_symlinks) tmp_req = handle_requirements(package_root=package_root, dest_path=tmpdir, requirements=requirements_files, python_path=python_path, use_pipenv=use_pipenv, pipenv_timeout=kwargs['pipenv_timeout']) if should_use_docker(dockerize_pip): dockerized_pip(tmpdir, **kwargs) else: tmp_script = Path(tmpdir) / '__runway_run_pip_install.py' pip_cmd = [ python_path or sys.executable, '-m', 'pip', 'install', '--target', tmpdir, '--requirement', tmp_req ] # Pyinstaller build or explicit python path if getattr(sys, 'frozen', False) and not python_path: script_contents = os.linesep.join([ 'import runpy', 'from runway.util import argv', 'with argv(*{}):'.format(json.dumps(pip_cmd[2:])), ' runpy.run_module("pip", run_name="__main__")\n' ]) # TODO remove python 2 logic when dropping python 2 tmp_script.write_text(script_contents if sys.version_info.major > 2 else script_contents.decode('UTF-8')) cmd = [sys.executable, 'run-python', str(tmp_script)] else: cmd = pip_cmd try: subprocess.check_call(cmd) except subprocess.CalledProcessError: raise PipError finally: if tmp_script.is_file(): tmp_script.unlink() req_files = _find_files(tmpdir, includes='**', follow_symlinks=False) return _zip_files(req_files, tmpdir)
def test_read_json2(): with tempfile.TemporaryDirectory() as tmpdir: json_filename = Path(tmpdir) / "ycecream.json" with open(str(json_filename), "w") as f: print('{"prefix": "xxx", "delta": 10}', file=f) sys.path = [tmpdir] + sys.path ycecream.set_defaults() ycecream.apply_json() sys.path.pop(0) y1 = y.new() s = y1(3, as_str=True) assert s == "xxx3\n" assert 10 < y1.delta < 11 with open(str(json_filename), "w") as f: print('{"prefix1": "xxx"}', file=f) sys.path = [tmpdir] + sys.path with pytest.raises(ValueError): ycecream.set_defaults() ycecream.apply_json() sys.path.pop(0) with open(str(json_filename), "w") as f: print('{"serialize": "xxx"}', file=f) sys.path = [tmpdir] + sys.path with pytest.raises(ValueError): ycecream.set_defaults() ycecream.apply_json() sys.path.pop(0) tmpdir = Path(tmpdir) / "ycecream" tmpdir.mkdir() json_filename = Path(tmpdir) / "ycecream.json" with open(str(json_filename), "w") as f: print('{"prefix": "yyy"}', file=f) sys.path = [str(tmpdir)] + sys.path ycecream.set_defaults() ycecream.apply_json() sys.path.pop(0) y1 = y.new() s = y1(3, as_str=True) assert s == "yyy3\n" tmpdir = Path(tmpdir) / "ycecream" tmpdir.mkdir() json_filename = Path(tmpdir) / "ycecream.json" with open(str(json_filename), "w") as f: print("{}", file=f) sys.path = [str(tmpdir)] + sys.path ycecream.set_defaults() ycecream.apply_json() sys.path.pop(0)
def dist_learn(env, q_dist_func, num_atoms=51, V_max=10, lr=25e-5, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.05, exploration_final_eps=0.01, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=2000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=1, callback=None): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.single_threaded_session() sess.__enter__() def make_obs_ph(name): print name return U.BatchInput(env.observation_space.shape, name=name) act, train, update_target, debug = build_dist_train( make_obs_ph=make_obs_ph, dist_func=q_dist_func, num_actions=env.action_space.n, num_atoms=num_atoms, V_max=V_max, optimizer=tf.train.AdamOptimizer(learning_rate=lr, epsilon = 0.01/batch_size), gamma=gamma, grad_norm_clipping=10 ) print "===================================" print "learning rate: {}, epsilon adam: {}".format(lr, 0.01/batch_size) print "===================================" act_params = { 'make_obs_ph': make_obs_ph, 'q_dist_func': q_dist_func, 'num_actions': env.action_space.n, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") print model_file # mkdir_p(os.path.dirname(model_file)) for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value action = act(np.array(obs)[None], update_eps=exploration.value(t))[0] new_obs, rew, done, _ = env.step(action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: print "steps : {}".format(t) print "episodes : {}".format(num_episodes) print "mean 100 episode reward: {}".format(mean_100ep_reward) # print "mean 100 episode reward".format(mean_100ep_reward) # logger.record_tabular("episodes", num_episodes) # logger.record_tabular("mean 100 episode reward", mean_100ep_reward) # logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) # logger.dump_tabular() # logger.record_tabular("steps", t) # logger.record_tabular("episodes", num_episodes) # logger.record_tabular("mean 100 episode reward", mean_100ep_reward) # logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) # logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and t % checkpoint_freq == 0): print "==========================" print "Error: {}".format(td_errors) if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: print "Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward) # logger.log("Saving model due to mean reward increase: {} -> {}".format( # saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: print "Restored model with mean reward: {}".format(saved_mean_reward) # logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) U.load_state(model_file) return ActWrapper(act, act_params)
def test_check_output(capsys): """ special Pythonista code, as that does not reload x1 and x2 """ if "x1" in sys.modules: del sys.modules["x1"] if "x2" in sys.modules: del sys.modules["x2"] del sys.modules["ycecream"] from ycecream import y """ end of special Pythonista code """ with y.preserve(): with tempfile.TemporaryDirectory() as tmpdir: x1_file = Path(tmpdir) / "x1.py" with open(str(x1_file), "w") as f: print( """\ def check_output(): from ycecream import y import x2 y.configure(show_line_number=True, show_exit= False) x2.test() y(1) y( 1 ) with y(prefix="==>"): y() with y( prefix="==>" ): y() @y def x(a, b=1): pass x(2) @y() def x( ): pass x() """, file=f, ) x2_file = Path(tmpdir) / "x2.py" with open(str(x2_file), "w") as f: print( """\ from ycecream import y def test(): @y() def myself(x): y(x) return x myself(6) with y(): pass """, file=f, ) sys.path = [tmpdir] + sys.path import x1 x1.check_output() sys.path.pop(0) out, err = capsys.readouterr() assert (err == """\ y| #5[x2.py] in test() ==> called myself(6) y| #6[x2.py] in myself() ==> x: 6 y| #10[x2.py] in test() ==> enter y| #7[x1.py] in check_output() ==> 1 y| #8[x1.py] in check_output() ==> 1 ==>#11[x1.py] in check_output() ==> enter y| #12[x1.py] in check_output() ==>#14[x1.py] in check_output() ==> enter y| #21[x1.py] in check_output() y| #24[x1.py] in check_output() ==> called x(2) y| #33[x1.py] in check_output() ==> called x() """)
def doImport(self, locator, fmt="list", marshalHelper=None, numParts=None, **kwargs): """Deserialize data at the target locator in specified format. The deserialized data is optionally post-processed by the input helper method. Args: locator (str): path or URI to input data fmt (str, optional): format for deserialization (mmcif, tdd, csv, list). Defaults to "list". marshalHelper (method, optional): post-processor method applied to deserialized data object. Defaults to None. numParts (int, optional): deserialize the data in parts. Defaults to None. (json and pickle formats) tarMember (str, optional): name of a member of tar file bundle. Defaults to None. (tar file format) Returns: Any: format specific return type """ try: tarMember = kwargs.get("tarMember", None) localFlag = self.__fileU.isLocal(locator) and not tarMember # if localFlag and numParts and fmt in ["json", "pickle"]: filePath = self.__fileU.getFilePath(locator) ret = self.__ioU.deserializeInParts(filePath, numParts, fmt=fmt, **kwargs) elif localFlag: filePath = self.__fileU.getFilePath(locator) ret = self.__ioU.deserialize(filePath, fmt=fmt, workPath=self.__workPath, **kwargs) else: # if fmt == "mmcif": ret = self.__ioU.deserialize(locator, fmt=fmt, workPath=self.__workPath, **kwargs) else: with tempfile.TemporaryDirectory( suffix=self.__workDirSuffix, prefix=self.__workDirPrefix, dir=self.__workPath) as tmpDirName: # # Fetch first then read a local copy - # if tarMember: localFilePath = os.path.join( self.__workPath, tmpDirName, tarMember) else: localFilePath = os.path.join( self.__workPath, tmpDirName, self.__fileU.getFileName(locator)) # --- Local copy approach --- self.__fileU.get(locator, localFilePath, **kwargs) ret = self.__ioU.deserialize(localFilePath, fmt=fmt, workPath=self.__workPath, **kwargs) if marshalHelper: ret = marshalHelper(ret, **kwargs) except Exception as e: logger.exception("Importing locator %r failing with %s", locator, str(e)) ret = None return ret
def learn( env, p_dist_func, lr=5e-4, eps=0.0003125, max_timesteps=100000, buffer_size=50000, exp_t1=1e6, exp_p1=0.1, exp_t2=25e6, exp_p2=0.01, # exploration_fraction=0.1, # exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=0.95, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, callback=None, dist_params=None, n_action=None, action_map=None): """Train a distdeepq model. Parameters ------- env: gym.Env environment to train on p_dist_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/distdeepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = make_session(num_cpu=num_cpu) sess.__enter__() logger.configure(dir=os.path.join( '.', datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))) # logger.configure() def make_obs_ph(name): return ObservationInput(env.observation_space, name=name) if dist_params is None: raise ValueError('dist_params is required') # z, dz = build_z(**dist_params) act, train, update_target, debug = distdeepq_mog.build_train( make_obs_ph=make_obs_ph, p_dist_func=p_dist_func, # num_actions=env.action_space.n, n_action=n_action, optimizer=tf.train.AdamOptimizer(learning_rate=lr, epsilon=eps), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise, dist_params=dist_params) act_params = { 'make_obs_ph': make_obs_ph, 'p_dist_func': p_dist_func, 'num_actions': n_action, 'dist_params': dist_params } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. #exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), # initial_p=1.0, # final_p=exploration_final_eps) # exploration = PiecewiseSchedule([(0, 1.0),(max_timesteps/25, 0.1), # (max_timesteps, 0.01)], outside_value=0.01) exploration = PiecewiseSchedule([(0, 1.0), (exp_t1, exp_p1), (exp_t2, exp_p2)], outside_value=exp_p2) # Initialize the parameters and copy them to the target network. U.initialize() update_target() avg_success_list = deque(maxlen=100) avg_collision_list = deque(maxlen=100) avg_derail_list = deque(maxlen=100) episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] reset = False action_val = action_map[action] new_obs, rew, done, info = env.step(action_val) # env.render() # rew = rew-1 for proposed loss with new metric # rew = rew-1 # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) if info == 1: avg_success_list.append(1.0) avg_collision_list.append(0.0) avg_derail_list.append(0.0) elif info == -1: avg_success_list.append(0.0) avg_collision_list.append(1.0) avg_derail_list.append(0.0) elif info == -2: avg_success_list.append(0.0) avg_collision_list.append(0.0) avg_derail_list.append(1.0) else: avg_success_list.append(0.0) avg_collision_list.append(0.0) avg_derail_list.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) # debug['pi'] = tf.Print(debug['pi'], [debug['pi'], "target pi"]) # tf.Print(debug['mu'], [debug['mu'], "target mu"]) # tf.Print(debug['sigma'], [debug['sigma'], "target sigma"]) logger.record_tabular("Success rate", np.mean(avg_success_list)) logger.record_tabular("Collision rate", np.mean(avg_collision_list)) logger.record_tabular("Derailment rate", np.mean(avg_derail_list)) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) U.load_state(model_file) return ActWrapper(act, act_params)
def learn(env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = tf.Session() sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph def make_obs_ph(name): return ObservationInput(env.observation_space, name=name) act, train, update_target, debug = build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_state(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_state(model_file) return act
def __init__(self, plabels, flabels, command, context_directory, psizes=None, fsizes=None, coupling=None, hosts=None, pool=None, clean=False, discover_pattern=None, save_dir=None, space_fname='sample-space.json', space_format='json', data_fname='sample-data.json', data_format='json'): """Initialize the provider. :param list(str) plabels: input parameter names. :param list(str) flabels: output feature names. :param str command: command to be executed for computing new snapshots. :param str context_directory: store every ressource required for executing a job. :param list(int) psizes: number of components of parameters. :param list(int) fsizes: number of components of output features. :param dict coupling: Definition of the snapshots IO files: - **coupling_directory** (str) -- sub-directory in ``context_directory`` that will contain input parameters and output file. - **input_fname** (str) -- basename for files storing the point coordinates ``plabels``. - **input_format** (str) -- ``json`` (default), ``csv``, ``npy``, ``npz``. - **output_fname** (str) -- basename for files storing values associated to ``flabels``. - **output_format** (str) -- ``json`` (default), ``csv``, ``npy``, ``npz``. :param list(dict) hosts: Definition of the remote HOSTS if any: - **hostname** (str) -- Remote host to connect to. - **remote_root** (str) -- Remote folder to create and store data. - **username** (str) -- username. - **password** (str) -- password. :param pool: pool executor. :type pool: :class:`concurrent.futures`.xxx.xxx.Executor. :param bool clean: whether to remove working directories. :param str discover_pattern: UNIX-style patterns for directories with pairs of sample files to import. :param str save_dir: path to a directory for saving known snapshots. :param str space_fname: name of space file to write. :param str data_fname: name of data file to write. :param str space_format: space file format. :param str data_format: data file format. """ # discover existing snapshots self._cache = SampleCache(plabels, flabels, psizes, fsizes, save_dir, space_fname, space_format, data_fname, data_format) if discover_pattern: self._cache.discover(discover_pattern) self._cache.save() self.safe_saved = False # job specification self._job = { 'command': command, 'context_directory': context_directory, 'coupling_directory': 'batman-coupling', 'input_fname': space_fname, 'input_sizes': self.psizes, 'input_labels': self.plabels, 'input_format': space_format, 'output_fname': data_fname, 'output_sizes': self.fsizes, 'output_labels': self.flabels, 'output_format': data_format, 'clean': clean, } if coupling is not None: self._job.update(coupling) self.logger.debug('Job specification: {}'.format(self._job)) # execution if save_dir is not None: workdir = save_dir else: _tmp = tempfile.TemporaryDirectory() self._job['clean'] = True workdir = _tmp.name self.backupdir = os.path.join(workdir, '.backup') try: os.makedirs(self.backupdir) except OSError: self.logger.warning('Was not able to create backup directory') finally: self._cache_backup = copy.deepcopy(self._cache) if pool is not None: self._pool = pool if hosts is not None: self._executor = MasterRemoteExecutor(local_root=workdir, job=self._job, hosts=hosts) else: self._executor = LocalExecutor(local_root=workdir, **self._job)
def _zip_package(package_root, includes, excludes=None, dockerize_pip=False, follow_symlinks=False, python_path=None, requirements_files=None, use_pipenv=False, **kwargs): """Create zip file in memory with package dependencies. Args: package_root (str): Base directory to copy files from. includes (List[str]): Inclusion patterns. Only files matching those patterns will be included in the result. excludes (List[str]): Exclusion patterns. Files matching those patterns will be excluded from the result. Exclusions take precedence over inclusions. dockerize_pip (Union[bool, str]): Whether to use docker or under what conditions docker will be used to run ``pip``. follow_symlinks (bool): If true, symlinks will be included in the resulting zip file. python_path (Optional[str]): Explicit python interpreter to be used. pipenv must be installed and executable using ``-m`` if provided. requirements_files (Dict[str, bool]): Map of requirement file names and wether they exist. use_pipenv (bool): Wether to use pipenv to export a Pipfile as requirements.txt. kwargs (Any): Advanced options for subprocess and docker. See source code to determine what is supported. Returns: Tuple[str, str]: Content of the ZIP file as a byte string and calculated hash of all the files """ kwargs.setdefault("pipenv_timeout", 300) temp_root = os.path.join(os.path.expanduser("~"), ".runway_cache") if not os.path.isdir(temp_root): os.makedirs(temp_root) # exclude potential virtual environments in the package excludes.append(".venv/") with tempfile.TemporaryDirectory(prefix="cfngin", dir=temp_root) as tmpdir: tmp_req = os.path.join(tmpdir, "requirements.txt") copydir(package_root, tmpdir, includes, excludes, follow_symlinks) tmp_req = handle_requirements( package_root=package_root, dest_path=tmpdir, requirements=requirements_files, python_path=python_path, use_pipenv=use_pipenv, pipenv_timeout=kwargs["pipenv_timeout"], ) if should_use_docker(dockerize_pip): dockerized_pip(tmpdir, **kwargs) else: tmp_script = Path(tmpdir) / "__runway_run_pip_install.py" pip_cmd = [ python_path or sys.executable, "-m", "pip", "install", "--target", tmpdir, "--requirement", tmp_req, "--no-color", ] subprocess_args = {} if kwargs.get("python_dontwritebytecode"): subprocess_args["env"] = dict(os.environ, PYTHONDONTWRITEBYTECODE="1") # Pyinstaller build or explicit python path if getattr(sys, "frozen", False) and not python_path: script_contents = os.linesep.join([ "import runpy", "from runway.util import argv", "with argv(*{}):".format(json.dumps(pip_cmd[2:])), ' runpy.run_module("pip", run_name="__main__")\n', ]) # TODO remove python 2 logic when dropping python 2 tmp_script.write_text(script_contents if sys.version_info.major > 2 else script_contents.decode("UTF-8")) cmd = [sys.executable, "run-python", str(tmp_script)] else: if not _pip_has_no_color_option(pip_cmd[0]): pip_cmd.remove("--no-color") cmd = pip_cmd LOGGER.info( "The following output from pip may include incompatibility errors. " "These can generally be ignored (pip will erroneously warn " "about conflicts between the packages in your Lambda zip and " "your host system).") try: subprocess.check_call(cmd, **subprocess_args) except subprocess.CalledProcessError: raise PipError finally: if tmp_script.is_file(): tmp_script.unlink() if kwargs.get("python_exclude_bin_dir") and os.path.isdir( os.path.join(tmpdir, "bin")): LOGGER.debug("Removing python /bin directory from Lambda files") shutil.rmtree(os.path.join(tmpdir, "bin")) if kwargs.get("python_exclude_setuptools_dirs"): for i in os.listdir(tmpdir): if i.endswith(".egg-info") or i.endswith(".dist-info"): LOGGER.debug("Removing directory %s from Lambda files", i) shutil.rmtree(os.path.join(tmpdir, i)) req_files = _find_files(tmpdir, includes="**", follow_symlinks=False) return _zip_files(req_files, tmpdir)
def install(architecture, conflicts, depends, description, enhances, maintainer, manager, name, recommends, suggests, summary, version, module): if manager == 'apt': with apt.Cache() as cache: is_installed, is_virtual, installed_pkg = apt_package_status(name, cache) if is_installed and version == installed_pkg.version: # package is present already return False, conflicts, depends, enhances, recommends, suggests with tempfile.TemporaryDirectory() as dir: pkg_path = make_deb( architecture, conflicts, dir, depends, description, enhances, maintainer, manager, name, recommends, suggests, summary, version, module) cmd = "apt-get install -y '{pkg_path}'".format(pkg_path=pkg_path) module.run_command(cmd, check_rc=True, cwd=dir, environ_update=dict_merge(ENV_VARS, APT_ENV_VARS)) return True, conflicts, depends, enhances, recommends, suggests elif manager == 'dnf': with dnf.Base() as base: base.read_all_repos() base.fill_sack(load_system_repo=True, load_available_repos=False) q = base.sack.query() installed_pkgs = q.installed().filter(name=name, version=version).run() if installed_pkgs: # package is present already return False, conflicts, depends, enhances, recommends, suggests base.reset(repos=True, sack=True) base.conf.substitutions.update_from_etc(base.conf.installroot) base.read_all_repos() base.fill_sack(load_system_repo=True, load_available_repos=True) with tempfile.TemporaryDirectory() as dir: pkg_path = make_rpm( architecture, conflicts, dir, depends, description, enhances, maintainer, manager, name, recommends, suggests, summary, version, module) # Install using dnf CLI # cmd = "dnf install -y '{pkg_path}'".format(pkg_path=pkg_path) # module.run_command(cmd, check_rc=True, cwd=dir, environ_update=ENV_VARS) # Install using dnf API pkgs = base.add_remote_rpms([pkg_path]) for pkg in pkgs: base.package_install(pkg) base.resolve() base.download_packages(base.transaction.install_set) base.do_transaction() return True, conflicts, depends, enhances, recommends, suggests elif manager == 'yum': yb = yum.YumBase() if yb.rpmdb.searchNevra(name=name, ver=version): # package is present already return False, conflicts, depends, enhances, recommends, suggests with tempfile.TemporaryDirectory() as dir: pkg_path = make_rpm( architecture, conflicts, dir, depends, description, enhances, maintainer, manager, name, recommends, suggests, summary, version, module) cmd = "yum install -y '{pkg_path}'".format(pkg_path=pkg_path) module.run_command(cmd, check_rc=True, cwd=dir, environ_update=ENV_VARS) return True, conflicts, depends, enhances, recommends, suggests # else manager not in [ 'apt', 'dnf', 'yum' ] return False, conflicts, depends, enhances, recommends, suggests
def test_two_files_written(self): with tempfile.TemporaryDirectory() as tmpdir: self.env_trans_set.write_cypher_files(tmpdir) target_dir = os.path.join(tmpdir, "succession") self.assertTrue(len(os.listdir(target_dir)), 2)
def setup_method(self): self.tempdir = tempfile.TemporaryDirectory() self.output_folder = self.tempdir.name folder = os.path.dirname(os.path.abspath(__file__)) self.test_data_path = os.path.join(folder, "test_data", "test.seq")
def __init__(self, config, batch_size, checkpoint_dir_or_path=None, var_name_substitutions=None, session_target='', **sample_kwargs): if tf.gfile.IsDirectory(checkpoint_dir_or_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir_or_path) else: checkpoint_path = checkpoint_dir_or_path self._config = copy.deepcopy(config) self._config.data_converter.set_mode('infer') self._config.hparams.batch_size = batch_size with tf.Graph().as_default(): model = self._config.model model.build( self._config.hparams, self._config.data_converter.output_depth, is_training=False) # Input placeholders self._temperature = tf.placeholder(tf.float32, shape=()) if self._config.hparams.z_size: self._z_input = tf.placeholder( tf.float32, shape=[batch_size, self._config.hparams.z_size]) else: self._z_input = None if self._config.data_converter.control_depth > 0: self._c_input = tf.placeholder( tf.float32, shape=[None, self._config.data_converter.control_depth]) else: self._c_input = None self._inputs = tf.placeholder( tf.float32, shape=[batch_size, None, self._config.data_converter.input_depth]) self._controls = tf.placeholder( tf.float32, shape=[batch_size, None, self._config.data_converter.control_depth]) self._inputs_length = tf.placeholder( tf.int32, shape=[batch_size] + list(self._config.data_converter.length_shape)) self._max_length = tf.placeholder(tf.int32, shape=()) # Outputs self._outputs, self._decoder_results = model.sample( batch_size, max_length=self._max_length, z=self._z_input, c_input=self._c_input, temperature=self._temperature, **sample_kwargs) if self._config.hparams.z_size: q_z = model.encode(self._inputs, self._inputs_length, self._controls) self._mu = q_z.loc self._sigma = q_z.scale.diag self._z = q_z.sample() var_map = None if var_name_substitutions is not None: var_map = {} for v in tf.global_variables(): var_name = v.name[:-2] # Strip ':0' suffix. for pattern, substitution in var_name_substitutions: var_name = re.sub(pattern, substitution, var_name) if var_name != v.name[:-2]: tf.logging.info('Renaming `%s` to `%s`.', v.name[:-2], var_name) var_map[var_name] = v # Restore graph self._sess = tf.Session(target=session_target) saver = tf.train.Saver(var_map) if (os.path.exists(checkpoint_path) and tarfile.is_tarfile(checkpoint_path)): tf.logging.info('Unbundling checkpoint.') with tempfile.TemporaryDirectory() as temp_dir: tar = tarfile.open(checkpoint_path) tar.extractall(temp_dir) # Assume only a single checkpoint is in the directory. for name in tar.getnames(): if name.endswith('.index'): checkpoint_path = os.path.join(temp_dir, name[0:-6]) break saver.restore(self._sess, checkpoint_path) else: saver.restore(self._sess, checkpoint_path)
def test_opendss_writer(): from ditto.writers.opendss.write import Writer from ditto.models.node import Node from ditto.models.line import Line from ditto.models.load import Load from ditto.models.regulator import Regulator from ditto.models.wire import Wire from ditto.models.capacitor import Capacitor from ditto.models.powertransformer import PowerTransformer from ditto.models.winding import Winding # from ditto.model import Model from ditto.store import Store from ditto.models.storage import Storage from ditto.models.phase_storage import PhaseStorage from ditto.models.base import Unicode from ditto.models.base import Float from ditto.models.power_source import PowerSource from ditto.models.phase_load import PhaseLoad m = Store() node1 = Node(m, name="n1") node2 = Node(m, name="n2") node3 = Node(m, name="n3") wirea = Wire(m, gmr=1.3, X=2, Y=20) wiren = Wire(m, gmr=1.2, X=2, Y=20) line1 = Line(m, name="l1", wires=[wirea, wiren]) phase_load1 = PhaseLoad(m, p=5400, q=2615.3394) load1 = Load(m, name="load1", phase_loads=[phase_load1]) winding1 = Winding( m, connecting_element="n2", connection_type="W", num_phases=3, nominal_voltage=12.47, rated_power=25, tap_percentage=1, ) winding2 = Winding( m, connecting_element="l1", connection_type="W", num_phases=3, nominal_voltage=6.16, rated_power=25, tap_percentage=1.2, ) transformer1 = PowerTransformer( m, name="t1", from_element="n2", to_element="n3", windings=[winding1, winding2], feeder_name="f1", ) transformer1.reactances.append(6) reg1 = Regulator( m, name="t1_reg", connected_transformer="t1", connected_winding=2, pt_ratio=60, delay=2, ) cap1 = Capacitor( m, name="cap1", connecting_element="n2", num_phases=3, nominal_voltage=7.2, var=300, connection_type="Y", ) print(line1.impedance_matrix) # Storage testing phase_storage_A = PhaseStorage(m, phase="A", p=15.0, q=5.0) phase_storage_B = PhaseStorage(m, phase="B", p=16.0, q=6.0) storage = Storage( m, name="store1", connecting_element="n3", nominal_voltage=12470.0, rated_power=10000.0, rated_kWh=100.0, stored_kWh=75.5, reserve=20.0, discharge_rate=25.0, charge_rate=18.7, charging_efficiency=15.3, discharging_efficiency=22.0, resistance=20, reactance=10, model_=1, phase_storages=[phase_storage_A, phase_storage_B], ) # PV systems testing PV_system = PowerSource( m, name="PV1", is_sourcebus=0, nominal_voltage=12470, phases=[Unicode("A"), Unicode("C")], rated_power=20000.0, connection_type="D", cutout_percent=30.0, cutin_percent=15.3, resistance=14.0, reactance=5.2, v_max_pu=100, v_min_pu=60, power_factor=0.9, ) t = tempfile.TemporaryDirectory() writer = Writer(output_path=t.name) # writer.write_wiredata(m) # writer.write_linegeometry(m) # writer.write_linecodes(m) writer.write_storages(m) writer.write_PVs(m) writer.write_lines(m) writer.write_loads(m) writer.write_transformers(m) writer.write_regulators(m) writer.write_capacitors(m)