def run_and_compare(arguments, input_dir, ref_gro, ref_top, ref_stdout, ref_stderr, runner=_run_external): """ Run insane and compare its output against a reference """ # Create the command as a list for subprocess.Popen. # The arguments can be pass to the current function as a string or as a # list of arguments. If they are passed as a string, they need to be # converted to a list. arguments = _arguments_as_list(arguments) # The name of the output gro file must be provided to insane for insane to # work. Since we also need that file name, let's get it from insane's # arguments. gro_output = _output_from_arguments(arguments, option='-o') if ref_top is not None: top_output = _output_from_arguments(arguments, option='-p') # We want insane to run in a temporary directory. This allows to keep the # file system clean, and it avoids mixing output of different tests. with utils.tempdir(): out, err, returncode = run_insane(arguments, input_dir, runner=runner) assert not returncode assert os.path.exists(gro_output) if os.path.splitext(gro_output)[-1] == '.gro': utils.assert_gro_equal(gro_output, ref_gro) else: compare(gro_output, ref_gro) compare(utils.ContextStringIO(out), ref_stdout) compare(utils.ContextStringIO(err), ref_stderr) if ref_top is not None: compare(top_output, ref_top)
def setUp(self): self.temp_dir = utils.tempdir() self.num_events = 100 self.start_time = datetime.datetime.now() self.end_time = self.start_time + datetime.timedelta(seconds=self.num_events) self.summary_writer = SummaryWriter(self.temp_dir, filename_suffix="tmp") self.event_file_path = self.summary_writer.file_writer.event_writer._file_name
def test_utils_tempdir(self): oldcwd = os.getcwd() with utils.tempdir(): tmp_cwd = os.getcwd() self.assertEqual('/tmp', os.path.dirname(tmp_cwd)) self.assertEqual(os.getcwd(), oldcwd) self.assertFalse(os.path.exists(tmp_cwd))
def start(start_with, depth, process_num, file_path): """ This function creates (at most) `process_num` processes to run producers, which put responses into shared queue. Consumers will aggregate the results. :param file_path: the file path of input file :param start_with: starting part of metrics :param depth: how many levels in the nested dictionary :param process_num: number of processes to run producers :return consumers.subparts_to_counts """ with tempdir() as tmp_dir: part_paths = partitioner.split_file_into_parts(file_path, process_num, tmp_dir) queue = Queue(maxsize=10000) processes = [] for part_path in part_paths: p = Process(target=create_and_run_producers, args=( queue, part_path, )) p.daemon = True p.start() processes.append(p) consumers = Consumers(queue, start_with, depth) consumer_thread = threading.Thread(target=consumers.start, args=(len(part_paths), )) consumer_thread.daemon = True consumer_thread.start() for p in processes: p.join() consumer_thread.join() return consumers.subparts_to_counts
def generate_simple_case_references(): """ Run insane to generate reference files for the simple regression tests. Run insane with the arguments listed in SIMPLE_TEST_CASES. The output GRO file, the standard output, and the standard error are stored in the DATA_DIR/simple_case directory. """ for case in SIMPLE_TEST_CASES: case_args, input_dir, alias = _split_case(case) arguments = _arguments_as_list(case_args) out_gro = _output_from_arguments(arguments, option='-o') try: out_top = _output_from_arguments(arguments, option='-p') except ValueError: out_top = None ref_gro, ref_top, ref_stdout, ref_stderr = _reference_path( case_args, alias) with utils.tempdir(): print(INSANE + ' ' + ' '.join(arguments)) out, err, _ = run_insane(arguments, input_dir) with open(ref_stdout, 'w') as outfile: for line in out: print(line, file=outfile, end='') with open(ref_stderr, 'w') as outfile: for line in err: print(line, file=outfile, end='') shutil.copy2(out_gro, ref_gro) if out_top is not None: shutil.copy2(out_top, ref_top)
def generate_simple_case_references(): """ Run insane to generate reference files for the simple regression tests. Run insane with the arguments listed in SIMPLE_TEST_CASES. The output GRO file, the standard output, and the standard error are stored in the DATA_DIR/simple_case directory. """ for case in SIMPLE_TEST_CASES: case_args, input_dir, alias = _split_case(case) arguments = _arguments_as_list(case_args) out_gro = _output_from_arguments(arguments, option='-o') try: out_top = _output_from_arguments(arguments, option='-p') except ValueError: out_top = None ref_gro, ref_top, ref_stdout, ref_stderr = _reference_path(case_args, alias) with utils.tempdir(): print(INSANE + ' ' + ' '.join(arguments)) out, err, _ = run_insane(arguments, input_dir) with open(ref_stdout, 'w') as outfile: for line in out: print(line, file=outfile, end='') with open(ref_stderr, 'w') as outfile: for line in err: print(line, file=outfile, end='') shutil.copy2(out_gro, ref_gro) if out_top is not None: shutil.copy2(out_top, ref_top)
def test_equal(self): """ Make sure that identical files do not fail. """ with utils.tempdir(): with open('ref.gro', 'w') as outfile: print(textwrap.dedent(self.ref_gro_content), file=outfile, end='') utils.assert_gro_equal('ref.gro', 'ref.gro')
def test_multihash_main(self): ok, _, out, _ = utils.run(['md5sum'] + self.files_to_hash, out=True) self.assertTrue(ok) with utils.tempdir(): multihash.main(['-v', '-f', '-d', os.getcwd()] + self.files_to_hash) cksum_fn = os.path.join(os.getcwd(), 'MD5SUMS') with open(cksum_fn, 'rb') as md5f: self.assertEqual(md5f.read(), out) with self.assertRaises(ValueError): multihash.main(['-d', os.getcwd()] + self.files_to_hash)
def _get_classification_trainer(self): tmp_checkpoint_dir = utils.tempdir() checkpoint_callback = pl.callbacks.ModelCheckpoint(tmp_checkpoint_dir, 'val/accuracy', mode='max') early_stop_callback = pl.callbacks.EarlyStopping('val/accuracy', mode='max') trainer = pl.Trainer(logger=False, max_epochs=20, checkpoint_callback=checkpoint_callback, early_stop_callback=early_stop_callback, progress_bar_refresh_rate=0) return trainer
def test_multihash_multisum(self): ok, _, out, _ = utils.run(['md5sum'] + self.files_to_hash, out=True) self.assertTrue(ok) class args(object): def __init__(self, directory, force): self.force = force self.directory = directory with utils.tempdir(): anargs = args(os.getcwd(), True) multihash.multisum(self.computed, anargs) cksum_fn = os.path.join(anargs.directory, 'MD5SUMS') with open(cksum_fn, 'rb') as md5f: self.assertEqual(md5f.read(), out) anargs = args(os.getcwd(), False) with self.assertRaises(ValueError): multihash.multisum(self.computed, anargs)
def run_python(exercise, check): """Run exercise python code, and then check it with check code. Returns the same value as `exerciser.run_exercise`. """ # We write the exercise and check code to files in a new temporary # directory, then invoke exerciser.run_exercise. with tempdir(prefix='choosy-') as tmpdir: with change_dir(tmpdir): with open("exercise.py", "w") as f: f.write(exercise) with open("check.py", "w") as f: f.write(check) if USE_PYPY: return run_exercise_in_sandbox(tmpdir) else: return exerciser.run_exercise(tmpdir)
def test_diff_in_tolerance(self): """ Make sure that small errors in coordinates are not caught. """ gro_content = """\ INSANE! Membrane UpperLeaflet>POPC=1 LowerLeaflet>POPC=1 4 1POPC NC3 1 2.111 14.647 11.951 1POPC PO4 2 2.177 14.644 11.651 1POPC GL1 3 2.128 14.642 11.352 # Is within tolerance 1POPC GL2 4 1.961 14.651 11.351 10 10 10""" with utils.tempdir(): with open('ref.gro', 'w') as outfile: print(textwrap.dedent(self.ref_gro_content), file=outfile, end='') with open('content.gro', 'w') as outfile: print(textwrap.dedent(gro_content), file=outfile, end='') utils.assert_gro_equal('content.gro', 'ref.gro')
def test_diff_field(self): """ Make sure that a difference in a field is caught. """ gro_content = """\ INSANE! Membrane UpperLeaflet>POPC=1 LowerLeaflet>POPC=1 4 1POPC NC3 1 2.111 14.647 11.951 1DIFF PO4 2 2.177 14.644 11.651 1POPC GL1 3 2.128 14.642 11.351 1POPC GL2 4 1.961 14.651 11.351 10 10 10""" with utils.tempdir(): with open('ref.gro', 'w') as outfile: print(textwrap.dedent(self.ref_gro_content), file=outfile, end='') with open('content.gro', 'w') as outfile: print(textwrap.dedent(gro_content), file=outfile, end='') assert_raises(AssertionError, utils.assert_gro_equal, 'content.gro', 'ref.gro')
def test_diff_title(self): """ Make sure that a different title is caught. """ gro_content = """\ A different title 4 1POPC NC3 1 2.111 14.647 11.951 1POPC PO4 2 2.177 14.644 11.651 1POPC GL1 3 2.128 14.642 11.351 1POPC GL2 4 1.961 14.651 11.351 10 10 10""" with utils.tempdir(): with open('ref.gro', 'w') as outfile: print(textwrap.dedent(self.ref_gro_content), file=outfile, end='') with open('content.gro', 'w') as outfile: print(textwrap.dedent(gro_content), file=outfile, end='') assert_raises(AssertionError, utils.assert_gro_equal, 'content.gro', 'ref.gro')
def run_and_compare(arguments, input_dir, ref_dir, runner): """ Run insane and compare its output against a reference """ # Create the command as a list for subprocess.Popen. # The arguments can be pass to the current function as a string or as a # list of arguments. If they are passed as a string, they need to be # converted to a list. arguments = _arguments_as_list(arguments) ref_stdout = os.path.join(ref_dir, 'stdout') ref_stderr = os.path.join(ref_dir, 'stderr') # We want insane to run in a temporary directory. This allows to keep the # file system clean, and it avoids mixing output of different tests. with utils.tempdir(): out, err, returncode = run_insane(arguments, input_dir, runner=runner) assert not returncode compare(utils.ContextStringIO(out), ref_stdout) compare(utils.ContextStringIO(err), ref_stderr) compare_directories('./', ref_dir, ignore=('stderr', 'stdout', 'testlog'))
def main(): with tempdir() as tmp_dir: worker = WorkerServer(tmp_dir) listening = Thread(target=worker.server.serve_forever) listening.daemon = True processing = Thread(target=worker.process_file) processing.daemon = True sending = Thread(target=worker.send_result_to_master) sending.daemon = True listening.start() log.info( 'worker server starts up, listening on port {}'.format(PORT_NUM)) processing.start() sending.start() to_stop_server.wait(timeout=WORKER_MAX_LIVING_TIME) if not to_stop_server.is_set(): log.warning( 'waiting too long for the shutdown signal from master, shutting down automatically' ) to_stop_server.set() worker.server.shutdown() worker.server.socket.close()
def run_and_compare(arguments, input_dir, ref_gro, ref_top, ref_stdout, ref_stderr, runner=_run_external): """ Run program and compare its output against a reference """ # Create the command as a list for subprocess.Popen. # The arguments can be pass to the current function as a string or as a # list of arguments. If they are passed as a string, they need to be # converted to a list. arguments = _arguments_as_list(arguments) # The name of the output file must be provided to program for program to # work. Since we also need that file name, let's get it from program's # arguments. ## Magic here -- Expect DRAGONS gro_output = _output_from_arguments(arguments, option='-o') if ref_top is not None: top_output = _output_from_arguments(arguments, option='-p') # We want program to run in a temporary directory. This allows to keep the # file system clean, and it avoids mixing output of different tests. with utils.tempdir(): out, err, returncode = run_program(arguments, input_dir, runner=runner) assert not returncode assert os.path.exists(gro_output) if os.path.splitext(gro_output)[-1] == '.gro': utils.assert_gro_equal(gro_output, ref_gro) else: compare(gro_output, ref_gro) compare(utils.ContextStringIO(out), ref_stdout) compare(utils.ContextStringIO(err), ref_stderr) if ref_top is not None: compare(top_output, ref_top)
def test_diff_natoms(self): """ Make sure that differences in number of atom is caught. """ gro_content = """\ INSANE! Membrane UpperLeaflet>POPC=1 LowerLeaflet>POPC=1 6 1POPC NC3 1 2.111 14.647 11.951 1POPC PO4 2 2.177 14.644 11.651 1POPC GL1 3 2.128 14.642 11.351 1POPC GL2 4 1.961 14.651 11.351 1POPC C1A 5 2.125 14.651 11.051 1POPC D2A 6 2.134 14.602 10.751 10 10 10""" with utils.tempdir(): with open('ref.gro', 'w') as outfile: print(textwrap.dedent(self.ref_gro_content), file=outfile, end='') with open('content.gro', 'w') as outfile: print(textwrap.dedent(gro_content), file=outfile, end='') assert_raises(AssertionError, utils.assert_gro_equal, 'content.gro', 'ref.gro')
def rollout_classroom(self, classroom): if not classroom: raise Exception('No classroom given') try: with utils.tempdir("virtesk-virtroom-rollout-") as temp_dir: self.logger.info( "Starting to roll out classroom '{0}'".format(classroom)) # Get VMs for given classroom vms_for_classroom = self.rhev_lib.get_vms_for_classroom( classroom) for vmconfig in vms_for_classroom: self.logger.debug(str(vmconfig)) # Make sure the VMs don't exist (avoiding conflicts) if self.rhev_lib.check_if_vms_exist(vms_for_classroom): self.logger.error( "Some VMs already exist. Please delete them first") sys.exit(-1) # Create (template) VMs for vm in vms_for_classroom: self.rhev_lib.create_standalone_vm(vm) # sys.exit(0) time.sleep(constants.VM_CREATION_SLEEP_TIME) # Wait for VMs to shut donw self.rhev_lib.wait_for_vms_down( vmconfigs=vms_for_classroom, formatstring="VM {0} successfully created") # Add NIC and initiate sysprep for vm in vms_for_classroom: self.rhev_lib.add_vm_nic(vm) self.rhev_lib.sysprep_vm(vm, temp_dir) self.logger.info('Waiting for sysprep to finish') time.sleep(constants.VM_SLEEP_TIME) # Wait for VMs to shut down msg_formatstring = \ "VM {0} has been stopped after running Autounattend.xml." self.rhev_lib.wait_for_vms_down(vmconfigs=vms_for_classroom, formatstring=msg_formatstring) # Postprocess VMs: # Eject ISOs, set statless and add (user-) group. # Create a snapshot of every VM. for vm in vms_for_classroom: self.rhev_lib.postprocess_vm(vm) # Wait for all VM snapshots to become ready. self.rhev_lib.wait_for_vm_snapshots_ready(vms_for_classroom) # Start VMs for vm in vms_for_classroom: self.rhev_lib.start_vm_after_rollout(vm) self.logger.info( "Finished rolling out classroom '{0}' successfully".format( classroom)) except Exception, e: logging.exception(e) raise Exception("Rolling out classroom '{0}' failed: {1}".format( classroom, e))
def rollout_classroom(self, classroom): if not classroom: raise Exception('No classroom given') try: with utils.tempdir("amoothei-virtroom-rollout-") as temp_dir: self.logger.info( "Starting to roll out classroom '{0}'".format(classroom) ) # Get VMs for given classroom vms_for_classroom = self.rhev_lib.get_vms_for_classroom( classroom) for vmconfig in vms_for_classroom: self.logger.debug(str(vmconfig)) # Make sure the VMs don't exist (avoiding conflicts) if self.rhev_lib.check_if_vms_exist(vms_for_classroom): self.logger.error( "Some VMs already exist. Please delete them first" ) sys.exit(-1) # Create (template) VMs for vm in vms_for_classroom: self.rhev_lib.create_standalone_vm(vm) # sys.exit(0) time.sleep(constants.VM_CREATION_SLEEP_TIME) # Wait for VMs to shut donw self.rhev_lib.wait_for_vms_down( vmconfigs=vms_for_classroom, formatstring="VM {0} successfully created" ) # Add NIC and initiate sysprep for vm in vms_for_classroom: self.rhev_lib.add_vm_nic(vm) self.rhev_lib.sysprep_vm(vm, temp_dir) self.logger.info('Waiting for sysprep to finish') time.sleep(constants.VM_SLEEP_TIME) # Wait for VMs to shut down msg_formatstring = \ "VM {0} has been stopped after running Autounattend.xml." self.rhev_lib.wait_for_vms_down( vmconfigs=vms_for_classroom, formatstring=msg_formatstring ) # Postprocess VMs: # Eject ISOs, set statless and add (user-) group. # Create a snapshot of every VM. for vm in vms_for_classroom: self.rhev_lib.postprocess_vm(vm) # Wait for all VM snapshots to become ready. self.rhev_lib.wait_for_vm_snapshots_ready(vms_for_classroom) # Start VMs for vm in vms_for_classroom: self.rhev_lib.start_vm_after_rollout(vm) self.logger.info( "Finished rolling out classroom '{0}' successfully".format( classroom ) ) except Exception, e: logging.exception(e) raise Exception( "Rolling out classroom '{0}' failed: {1}".format(classroom, e) )
def main(): start_with = sys.argv[1] if len(sys.argv) > 1 else "" workers = config.workers_url() start_time = time.time() log.info('master starts, start_with is "{}", workers are {}'.format( start_with, workers)) with tempdir() as tmp_dir: all_metrics_file_name = 'all_metric_names.tmp' with open('{}/{}'.format(tmp_dir, all_metrics_file_name), 'w') as all_metrics_file: get_all_metrics_into_file(start_with, all_metrics_file) parts = split_file_into_parts(file_path=all_metrics_file.name, num_parts=len(workers) * WORKER_JOB_NUM, output_dir=tmp_dir) log.info('partition finishes, all jobs are: {}'.format(parts)) jobs = [Job(start_with, part) for part in parts] # master preparation master = MasterServer(workers, jobs) # setup threads listening = Thread(target=master.server.serve_forever) listening.daemon = True sending = Thread(target=master.register_requests) sending.daemon = True checking = Thread(target=master.scan_requests) checking.daemon = True heartbeat = Thread(target=master.heartbeats) heartbeat.daemon = True listening.start() log.info( 'master server starts up, listening on port {}'.format(PORT_NO)) sending.start() checking.start() heartbeat.start() # waiting for results from workers results = [] while len(results) < len(jobs): try: result = master.results.get(timeout=MAX_JOB_INTERVAL) except Empty: log.error('master waited too long for result, shutting down') exit(1) results.append(result) # all work done, shutdown servers for worker in workers: try: log.info('sending shutdown to worker {}'.format(worker)) requests_retry_session(RETRY_NUM).get(worker + '/shutdown') except Exception as e: log.error( 'unable to stop worker {}, error message is {}'.format( worker, e.message)) master.server.shutdown() master.server.socket.close() log.info('master server shutdown, beginning aggregation') # start reducing phase merged, to_expands, total_number = reducer.start(results, start_with) put_to_tsdb(start_with, merged, to_expands) log.info( 'one round master aggregation finished, to_expands are {}'.format( to_expands)) expand(to_expands, tmp_dir, '{}/{}'.format(tmp_dir, all_metrics_file_name), total_number) log.info('finished! total running time is {}'.format(time.time() - start_time))