def test_1_serial_stomp(): from pykern.pkdebug import pkdp, pkdpretty from pykern.pkunit import pkfail, pkok from sirepo import sr_unit import copy fc = sr_unit.flask_client() sim_type = "srw" data = fc.sr_post("listSimulations", {"simulationType": sim_type}) for youngs in data: if youngs["name"] == "Young's Double Slit Experiment": break else: pkfail("{}: Young's not found", pkdpretty(data)) data = fc.sr_get( "simulationData", {"simulation_type": sim_type, "pretty": "0", "simulation_id": youngs["simulationId"]} ) prev_serial = data["models"]["simulation"]["simulationSerial"] prev_data = copy.deepcopy(data) pkok(prev_serial > _MIN_SERIAL, "{}: serial must be greater than {}", prev_serial, _MIN_SERIAL) data["models"]["beamline"][4]["position"] = "61" curr_data = fc.sr_post("saveSimulationData", data) curr_serial = curr_data["models"]["simulation"]["simulationSerial"] pkok(prev_serial < curr_serial, "{}: serial not incremented, still < {}", prev_serial, curr_serial) prev_data["models"]["beamline"][4]["position"] = "60.5" failure = fc.sr_post("saveSimulationData", prev_data) pkok(failure["error"] == "invalidSerial", "{}: unexpected status, expected serial failure", failure) curr_data["models"]["beamline"][4]["position"] = "60.5" curr_serial = curr_data["models"]["simulation"]["simulationSerial"] new_data = fc.sr_post("saveSimulationData", curr_data) new_serial = new_data["models"]["simulation"]["simulationSerial"] pkok(curr_serial < new_serial, "{}: serial not incremented, still < {}", new_serial, curr_serial)
def test_1_serial_stomp(): from pykern.pkdebug import pkdp, pkdpretty from pykern.pkunit import pkfail, pkok from sirepo import srunit import copy fc = srunit.flask_client() sim_type = 'srw' fc.get('/{}'.format(sim_type)) data = fc.sr_post('listSimulations', {'simulationType': sim_type}) for youngs in data: if youngs['name'] == "Young's Double Slit Experiment": break else: pkfail("{}: Young's not found", pkdpretty(data)) data = fc.sr_get( 'simulationData', { 'simulation_type': sim_type, 'pretty': '0', 'simulation_id': youngs['simulationId'], }, ) prev_serial = data['models']['simulation']['simulationSerial'] prev_data = copy.deepcopy(data) pkok( prev_serial > _MIN_SERIAL, '{}: serial must be greater than {}', prev_serial, _MIN_SERIAL, ) data['models']['beamline'][4]['position'] = '61' curr_data = fc.sr_post('saveSimulationData', data) curr_serial = curr_data['models']['simulation']['simulationSerial'] pkok( prev_serial < curr_serial, '{}: serial not incremented, still < {}', prev_serial, curr_serial, ) prev_data['models']['beamline'][4]['position'] = '60.5' failure = fc.sr_post('saveSimulationData', prev_data) pkok( failure['error'] == 'invalidSerial', '{}: unexpected status, expected serial failure', failure, ) curr_data['models']['beamline'][4]['position'] = '60.5' curr_serial = curr_data['models']['simulation']['simulationSerial'] new_data = fc.sr_post('saveSimulationData', curr_data) new_serial = new_data['models']['simulation']['simulationSerial'] pkok( curr_serial < new_serial, '{}: serial not incremented, still < {}', new_serial, curr_serial, )
def test_pkdpretty(): """Pretty printing arbitrary objects`""" from pykern.pkdebug import pkdpretty recursive = [] any_obj = object() recursive.append(recursive) for obj, expect in ( (u'{"a":1}', '{\n "a": 1\n}\n'), ('{"a":1}', '{\n "a": 1\n}\n'), ({'b': set([1])}, "{ 'b': set([1])}\n"), (recursive, recursive), (any_obj, any_obj), ): assert expect == pkdpretty(obj)
def assert_object_with_json(basename, actual): """Converts actual to JSON and compares with data_dir/basename.json Reads data_dir/basename.json and compares with actual converted to json. Trailing newline is managed properly. The keys are sorted and indentation is 4. actual written to work_dir. Args: expected_basename (str): file to be found in data_dir with json suffix actual (object): to be serialized as json """ actual = pkdpretty(actual) fn = '{}.json'.format(basename) pkio.write_text(work_dir().join(fn), actual) expect = pkio.read_text(data_dir().join(fn)) assert expect == actual
def assert_object_with_json(basename, actual): """Converts actual to JSON and compares with data_dir/basename.json Reads data_dir/basename.json and compares with actual converted to json. Trailing newline is managed properly. The keys are sorted and indentation is 4. actual written to work_dir. Args: expected_basename (str): file to be found in data_dir with json suffix actual (object): to be serialized as json """ actual = pkdpretty(actual) fn = '{}.json'.format(basename) pkio.write_text(work_dir().join(fn), actual) expect = pkio.read_text(data_dir().join(fn)) assert expect == actual, \ '{}: unexpected result'.format(basename)
def sr_sim_data(self, sim_type, sim_name): """Return simulation data by name Args: sim_type (str): app sim_name (str): case sensitive name Returns: dict: data """ data = self.sr_post('listSimulations', {'simulationType': sim_type}) for d in data: if d['name'] == sim_name: break else: pkunit.pkfail('{}: not found in ', sim_name, pkdpretty(data)) return self.sr_get( 'simulationData', { 'simulation_type': sim_type, 'pretty': '0', 'simulation_id': d['simulationId'], }, )
def _init_hosts_slots_balance(): """Balance sequential and parallel slot counts""" global _hosts_ordered def _host_cmp(a, b): """This (local) host will get (first) sequential slots. Sequential slots are "faster" and don't go over NFS (usually) so the interactive jobs will be more responsive (hopefully). We don't assign sequential slots randomly, but in fixed order. This helps reproduce bugs, because you know the first host is the sequential host. Slots are then randomized for execution. """ if a.remote_ip == a.local_ip: return -1 if b.remote_ip == b.local_ip: return +1 return cmp(a.name, b.name) def _ratio_not_ok(): """Minimum sequential job slots should be 40% of total""" mp = 0 ms = 0 for h in _hosts.values(): mp += h.num_slots.parallel ms += h.num_slots.sequential if mp + ms == 1: # Edge case where ratio calculation can't work (only dev) h = _hosts.values()[0] h.num_slots.sequential = 1 h.num_slots.parallel = 1 return False # Must be at least one parallel slot if mp <= 1: return False #TODO(robnagler) needs to be more complex, because could have many more # parallel nodes than sequential, which doesn't need to be so large. This # is a good guess for reasonable configurations. r = float(ms) / (float(mp) + float(ms)) return r < 0.4 _hosts_ordered = sorted(_hosts.values(), cmp=_host_cmp) while _ratio_not_ok(): for h in _hosts_ordered: # Balancing consists of making the first host have # all the sequential jobs, then the next host. This # is a guess at the best way to distribute sequential # vs parallel jobs. if h.num_slots.parallel > 0: # convert a parallel slot on first available host h.num_slots.sequential += _parallel_cores h.num_slots.parallel -= 1 break else: raise AssertionError( 'should never get here: {}'.format(pkdpretty(hosts)), ) for h in _hosts_ordered: pkdlog( '{}: parallel={} sequential={}', h.name, h.num_slots.parallel, h.num_slots.sequential, )