def setUp(self): self.task = [] self.task.append(config.build("mem_static", config.CODE_MEM_STATIC)) self.task.append(config.build("mem_alloc", config.CODE_MEM_ALLOC)) for t in self.task: self.assertTrue(t is not None) pass
def setUp(self): self.task = [] self.task.append(config.build("int80_exit1", config.CODE_INT80_EXIT1)) self.task.append(config.build("int80_fork", config.CODE_INT80_FORK)) for t in self.task: self.assertTrue(t is not None) pass
def setUp(self): self.task = [] self.task.append(config.build("exec", config.CODE_EXEC)) self.task.append(config.build("hello", config.CODE_HELLO_WORLD)) for t in self.task: self.assertTrue(t is not None) pass
def setUp(self): self.fout = [] self.task = [] self.secret = b"Magic World!" # 0: EACCES self.fout.append(config.touch("file_write_0.out", self.secret, 0o600)) self.task.append(config.build("file_write_0", config.TMPL_FILE_WRITE.replace( b"@FN@", ("\"%s\"" % self.fout[-1]).encode()))) # allow everyone to read from jail self.prefix = os.path.abspath(os.path.normpath(os.path.dirname(self.task[-1]))) os.chmod(self.prefix, 0o755) self.addCleanup(os.chmod, self.prefix, 0o700) # 1: EACCES fn_rel = "../file_write_1.out" fn_full = config.touch(fn_rel, self.secret, 0o666) self.assertTrue(fn_full is not None) self.addCleanup(os.unlink, fn_full) self.fout.append(fn_rel) self.task.append(config.build("file_write_1", config.TMPL_FILE_WRITE.replace( b"@FN@", ("\"%s\"" % self.fout[-1]).encode()))) # 2: OK fn_rel = "./file_write_2.out" fn_full = config.touch(fn_rel, self.secret, 0o666) self.assertTrue(fn_full is not None) self.fout.append(fn_rel) self.task.append(config.build("file_write_2", config.TMPL_FILE_WRITE.replace( b"@FN@", ("\"%s\"" % self.fout[-1]).encode()))) for f in self.fout + self.task: self.assertTrue(f is not None) pass
def setUp(self): self.task = [] self.task.append(config.build("fork", config.CODE_FORK)) self.task.append(config.build("vfork", config.CODE_VFORK)) self.task.append(config.build("clone", config.CODE_PTHREAD, LDFLAGS="-pthread")) for t in self.task: self.assertTrue(t is not None) pass
def setUp(self): self.task = [] self.task.append(config.build( "kill_ppid", config.TMPL_KILL_PPID.replace(b"@SIG@", b"SIGKILL"))) self.task.append(config.build("assert", config.CODE_SIGABRT)) for t in self.task: self.assertTrue(t is not None) pass
def setUp(self): self.task = [] self.task.append(config.build("busy_loop", config.CODE_LOOP)) self.task.append(config.build("blocking_io", config.CODE_A_PLUS_B)) self.task.append(config.build("pause", config.CODE_PAUSE)) self.task.append(config.build("sleep", config.CODE_SLEEP)) for t in self.task: self.assertTrue(t is not None) pass
def setUp(self): self.task = [] self.task.append(config.build("fork", config.CODE_FORK)) self.task.append(config.build("vfork", config.CODE_VFORK)) self.task.append( config.build("clone", config.CODE_PTHREAD, LDFLAGS="-pthread")) for t in self.task: self.assertTrue(t is not None) pass
def setUp(self): self.task = [] self.task.append(config.build("hello", config.CODE_HELLO_WORLD)) self.task.append(config.build("a_plus_b", config.CODE_A_PLUS_B)) self.task.append(config.build("exit1", config.CODE_EXIT1)) self.task.append(config.build("exit_group1", config.CODE_EXIT_GROUP1)) for t in self.task: self.assertTrue(t is not None) pass
def setUp(self): self.fout = [] self.task = [] self.fout.append(config.touch("file_write.out")) self.task.append(config.build("file_write", config.TMPL_FILE_WRITE.replace( b"@FN@", ("\"%s\"" % self.fout[-1]).encode()))) self.fout.append(config.touch("loop_print.out")) self.task.append(config.build("loop_print", config.CODE_LOOP_PRINT)) for fn in self.fout + self.task: self.assertTrue(fn is not None) pass
def main(): opt, logger, vis = cfg.build(is_train=False) dloader = data.get_data_loader(opt) print('Val dataset: {}'.format(len(dloader.dataset))) model = DiveModel(opt) model.setup_training() model.initialize_weights() for epoch in opt.which_epochs: # Load checkpoint if epoch == -1: # Find the latest checkpoint checkpoints = glob.glob(os.path.join(opt.ckpt_path, 'net*.pth')) assert len(checkpoints) > 0 epochs = [ int(filename.split('_')[-1].split('.')[0]) for filename in checkpoints ] epoch = max(epochs) logger.print('Loading checkpoints from {}, epoch {}'.format( opt.ckpt_path, epoch)) model.load(opt.ckpt_path, epoch) results = evaluate(opt, dloader, model) for metric in results: logger.print('{}: {}'.format(metric, results[metric]))
def test_toolchain(self): from subprocess import Popen, PIPE cmd = config.build("hello", config.CODE_HELLO_WORLD) self.assertTrue(cmd is not None) p = Popen(cmd, close_fds=True, stdout=PIPE) stdout, stderr = p.communicate() self.assertEqual(stdout, b"Hello World!\n") pass
def setUp(self): self.task = [] for fn, ptr in zip(("open", "open_null", "open_bogus"), (b"\"/dev/zero\"", b"NULL", b"(char *)0x01")): self.task.append( config.build(fn, config.TMPL_FILE_READ.replace(b"@FN@", ptr))) for t in self.task: self.assertTrue(t is not None) pass
def main(): config = config.Config() tf.logging.set_verbosity(".") with tf.Graph().as_default(): total_batch_size = FLAGS.total_batch_size inputs_dict = config.get_batch(batch_size) learning_rate = config.learning_rate tf.summary.scalar("learning rate", learning_rate) outputs_dict = config.build(inputs_dict, is_training = True) loss = outputs_dict["loss"] train = tf.train.AdamOptimizer.minimize(loss)
def setUp(self): self.task = [] self.task.append(config.build("sigbus_adraln", config.CODE_SIGBUS_ADRALN, CFLAGS=config.CFLAGS + " -ansi")) self.task.append(config.build("sigfpe_intdiv", config.CODE_SIGFPE_INTDIV)) self.task.append(config.build("sigsegv_maperr", config.CODE_SIGSEGV_MAPERR)) self.task.append(config.build("sigsegv_accerr", config.CODE_SIGSEGV_ACCERR)) self.task.append(config.build("busy_loop", config.CODE_LOOP)) self.task.append(config.build("rlimit_fsz", config.CODE_SIGXFSZ)) self.task.append(config.build("rlimit_cpu", config.CODE_SIGXCPU)) self.task.append(config.build("a_plus_b", config.CODE_A_PLUS_B)) for t in self.task: self.assertTrue(t is not None) pass
def build(PROBLEM): config.build(PROBLEM, PATHS) print(os.getcwd().split('/')[-1])
def build(PROBLEM): config.build(PROBLEM, PATHS) #config.build(PROBLEM, PATHS, COMPILE_PARAMS, RUNTIME_PARAMS) #config.copy_source_files() #config.collect_src() print(os.getcwd().split('/')[-1])
def main(): opt, logger, vis = cfg.build(is_train=True, tb_dir='train_log') # Training Set train_loader = get_data_loader(opt) print('Train dataset: {}'.format(len(train_loader.dataset))) # Validation set val_opt = copy.deepcopy(opt) val_opt.is_train = False val_loader = get_data_loader(val_opt) print('Val dataset: {}'.format(len(val_loader.dataset))) # Initialize model model = DiveModel(opt) model.setup_training() model.initialize_weights() # Load checkpoints if opt.load_ckpt_epoch != 0: opt.load_ckpt_dir = opt.ckpt_name ckpt_dir = os.path.join(opt.ckpt_dir, opt.dset_name, opt.load_ckpt_dir) assert os.path.exists(ckpt_dir) logger.print('Loading checkpoint from {}'.format(ckpt_dir)) model.load(ckpt_dir, opt.load_ckpt_epoch) opt.start_epoch = opt.load_ckpt_epoch opt.n_epochs = max(opt.n_epochs, opt.n_iters // len(train_loader)) logger.print('Total epochs: {}'.format(opt.n_epochs)) for epoch in range(opt.start_epoch, opt.n_epochs): model.setup(is_train=True) print('Train epoch', epoch) hp_dict = model.update_hyperparameters(epoch, opt.n_epochs) vis.add_scalar(hp_dict, epoch) for step, data in enumerate(train_loader): input, output, _, _ = data _, loss_dict = model.train(*data[:2]) if step % opt.log_every == 0: # Write to tensorboard vis.add_scalar(loss_dict, epoch * len(train_loader) + step) # Visualization model.test(input, output) vis.add_images(model.get_visuals(), epoch * len(train_loader) + step, prefix='train') # Random sample test data input, output, _, _ = val_loader.dataset[np.random.randint( len(val_loader.dataset))] input = input.unsqueeze(0) output = output.unsqueeze(0) model.test(input, output) vis.add_images(model.get_visuals(), epoch * len(train_loader) + step, prefix='test') logger.print('Epoch {}/{}:'.format(epoch, opt.n_epochs - 1)) # Evaluate on val set if opt.evaluate_every > 0 and (epoch) % opt.evaluate_every == 0 and \ opt.n_frames_output > 0: results = evaluate(val_opt, val_loader, model) vis.add_scalar(results, epoch) file = open(os.path.join(opt.ckpt_path, str(epoch)), "w+") for metric in results.keys(): logger.print('{}: {}'.format(metric, results[metric])) file.write('{}\t{}\n'.format(metric, results[metric])) file.close() # Save model checkpoints if (epoch + 1 ) % opt.save_every == 0 and epoch > 0 or epoch == opt.n_epochs - 1: model.save(opt.ckpt_path, epoch + 1)
# RUN: ./tbin/test_coverage.sh %s %t from libs.test_utils import TimeoutTest from host import Lobby, HostFactory import config conf = config.build() mqtt_factory = HostFactory.mqtt_factory(conf) l = Lobby(conf, mqtt_factory) c = mqtt_factory.new() c.connect() test = TimeoutTest(10.0) def start(topic, payload): """ Handler for start topic messages """ # Check that two players are joined assert len(payload.split(',')) == 3 # Test has passed test.passed() def act(): # Register handler c.sub('start', start) l.await_init_done() # Simulate players joining l.wait(0.1) c.pub('1/join', '1') c.pub('2/join', '1')
# RUN: ./tbin/test_coverage.sh %s %t from libs.test_utils import TimeoutTest from host import Lobby, HostFactory import config mqtt_factory = HostFactory.mqtt_factory(config.build()) l = Lobby(config.build(), mqtt_factory) c = mqtt_factory.new() c.connect() test = TimeoutTest(30.0) def start(topic, payload): """ Handler for start topic messages """ # Check that two players are joined assert len(payload.split(',')) == 3 # Test has passed test.passed() def act(): # Register handler c.sub('start', start) l.await_init_done() # Simulate players joining l.wait(0.1) c.pub('1/join', '1') c.pub('2/join', '1')