class SearchEngineTest(unittest.TestCase): def setUp(self): self.tempdir = TempDir() self.agent_home = self.tempdir.name def tearDown(self): self.tempdir.dissolve() def test_encoding(self): # given se = SearchEngine(INDEX_KEY, self.agent_home) headers = { 'From': '*****@*****.**', 'To': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Cc': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Subject': 'Some test mail', } # when se.index_mail(LeapMail('mailid', 'INBOX', headers=headers)) # test_helper.pixelated_mail(extra_headers=headers, chash='mailid')) result = se.search('folker') self.assertEqual((['mailid'], 1), result)
def test_use_build_script_instead_of_docker_file_if_available(self, docker_mock, res_mock, tempDir_mock): # given provider = DockerProvider(self._adapter, 'leap_provider', self._leap_provider_x509) tempBuildDir = TempDir() try: tempDir_mock.return_value = tempBuildDir tempBuildDir_name = tempBuildDir.name with NamedTemporaryFile() as file: res_mock.resource_exists.return_value = True res_mock.resource_string.return_value = '#!/bin/bash\necho %s $PWD > %s' % (file.name, file.name) # when provider.initialize() # then res_mock.resource_exists.assert_called_with('pixelated.resources', 'init-pixelated-docker-context.sh') res_mock.resource_string.assert_called_with('pixelated.resources', 'init-pixelated-docker-context.sh') with open(file.name, "r") as input: data = input.read().replace('\n', '') self.assertEqual('%s %s' % (file.name, os.path.realpath(tempBuildDir_name)), data) docker_mock.return_value.build.assert_called_once_with(path=tempBuildDir_name, tag='pixelated:latest', fileobj=None) finally: tempBuildDir.dissolve()
class SearchEngineTest(unittest.TestCase): def setUp(self): self.tempdir = TempDir() self.agent_home = self.tempdir.name def tearDown(self): self.tempdir.dissolve() def test_encoding(self): # given se = SearchEngine(INDEX_KEY, self.agent_home) headers = { "From": "*****@*****.**", "To": "=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=", "Cc": "=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=", "Subject": "Some test mail", } # when se.index_mail(test_helper.pixelated_mail(extra_headers=headers, chash="mailid")) result = se.search("folker") self.assertEqual((["mailid"], 1), result)
class SearchEngineTest(unittest.TestCase): def setUp(self): self.tempdir = TempDir() self.agent_home = self.tempdir.name def tearDown(self): self.tempdir.dissolve() def test_encoding(self): # given se = SearchEngine(INDEX_KEY, self.agent_home) headers = { 'From': '*****@*****.**', 'To': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Cc': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Subject': 'Some test mail', } # when se.index_mail( LeapMail('mailid', 'INBOX', headers=headers) ) # test_helper.pixelated_mail(extra_headers=headers, chash='mailid')) result = se.search('folker') self.assertEqual((['mailid'], 1), result)
def test_use_build_script_instead_of_docker_file_if_available( self, docker_mock, res_mock, tempDir_mock): # given provider = DockerProvider(self._adapter, 'leap_provider', self._leap_provider_x509) tempBuildDir = TempDir() try: tempDir_mock.return_value = tempBuildDir tempBuildDir_name = tempBuildDir.name with NamedTemporaryFile() as file: res_mock.resource_exists.return_value = True res_mock.resource_string.return_value = '#!/bin/bash\necho %s $PWD > %s' % ( file.name, file.name) # when provider.initialize() # then res_mock.resource_exists.assert_called_with( 'pixelated.resources', 'init-pixelated-docker-context.sh') res_mock.resource_string.assert_called_with( 'pixelated.resources', 'init-pixelated-docker-context.sh') with open(file.name, "r") as input: data = input.read().replace('\n', '') self.assertEqual( '%s %s' % (file.name, os.path.realpath(tempBuildDir_name)), data) docker_mock.return_value.build.assert_called_once_with( path=tempBuildDir_name, tag='pixelated:latest', fileobj=None) finally: tempBuildDir.dissolve()
def setUp(self): self._provider_hostname = 'example.org' self.users = MagicMock(spec=Users) self._tmpdir = TempDir() self.root_path = self._tmpdir.name self._adapter = MagicMock( wraps=PixelatedDockerAdapter(self._provider_hostname)) self._adapter.docker_image_name.return_value = 'pixelated' self._leap_provider_x509 = LeapProviderX509Info()
def setUp(self): self._tmpdir = TempDir() self._tmpbin = NamedTemporaryFile() self.mailpile_bin = self._tmpbin.name self.root_path = self._tmpdir.name self.gpg_initializer = MagicMock() self._adapter = MailpileAdapter(self.mailpile_bin, None, gpg_initializer=self.gpg_initializer) self.runner = ForkRunner(self.root_path, self._adapter)
def setUp(self): self.mock_provider = RESTfulServerTest.mock_provider self.mock_provider.reset_mock() self.mock_users.reset_mock() self.mock_authenticator.reset_mock() self.ssl_request = requests.Session() self.ssl_request.mount('https://', EnforceTLSv1Adapter()) self._tmpdir = TempDir() self._root_path = self._tmpdir.name
class SearchEngineTest(unittest.TestCase): def setUp(self): self.tempdir = TempDir() self.agent_home = self.tempdir.name def tearDown(self): self.tempdir.dissolve() def test_index_mail_secured_by_lock(self): # given soledad_querier = mock() lock_stub = LockStub() when(soledad_querier).get_index_masterkey().thenReturn(INDEX_KEY) self.assertEqual(INDEX_KEY, soledad_querier.get_index_masterkey()) se = SearchEngine(soledad_querier, self.agent_home) se._write_lock = lock_stub headers = { 'From': '*****@*****.**', 'To': '*****@*****.**', 'Subject': 'Some test mail', } # when se.index_mail(test_helper.pixelated_mail(extra_headers=headers)) # then self.assertTrue(lock_stub.called) def test_encoding(self): # given soledad_querier = mock() when(soledad_querier).get_index_masterkey().thenReturn(INDEX_KEY) se = SearchEngine(soledad_querier, self.agent_home) headers = { 'From': '*****@*****.**', 'To': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Cc': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Subject': 'Some test mail', } # when se.index_mail( test_helper.pixelated_mail(extra_headers=headers, chash='mailid')) result = se.search('folker') self.assertEqual((['mailid'], 1), result)
class SearchEngineTest(unittest.TestCase): def setUp(self): self.tempdir = TempDir() self.agent_home = self.tempdir.name def tearDown(self): self.tempdir.dissolve() def test_index_mail_secured_by_lock(self): # given soledad_querier = mock() lock_stub = LockStub() when(soledad_querier).get_index_masterkey().thenReturn(INDEX_KEY) self.assertEqual(INDEX_KEY, soledad_querier.get_index_masterkey()) se = SearchEngine(soledad_querier, self.agent_home) se._write_lock = lock_stub headers = { 'From': '*****@*****.**', 'To': '*****@*****.**', 'Subject': 'Some test mail', } # when se.index_mail(test_helper.pixelated_mail(extra_headers=headers)) # then self.assertTrue(lock_stub.called) def test_encoding(self): # given soledad_querier = mock() when(soledad_querier).get_index_masterkey().thenReturn(INDEX_KEY) se = SearchEngine(soledad_querier, self.agent_home) headers = { 'From': '*****@*****.**', 'To': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Cc': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Subject': 'Some test mail', } # when se.index_mail(test_helper.pixelated_mail(extra_headers=headers, chash='mailid')) result = se.search('folker') self.assertEqual((['mailid'], 1), result)
def testStdRun(self): with TempDir() as d: in_raster = os.path.join(d, 'test_bpi.tif') std_raster = os.path.join(d, 'test_std_bpi.tif') # was encountering this: ERROR 000875: Output raster: # c:\Users\shau7031\AppData\Local\Temp\tmp8co8nk\FocalSt_bath1's # workspace is an invalid output workspace. Force the workspace to temp: arcpy.env.scratchWorkspace = d bpi.main(bathy=config.bathy_raster, inner_radius=10, outer_radius=30, out_raster=in_raster, bpi_type='broad') self.assertTrue(os.path.exists(in_raster)) standardize_bpi_grids.main(bpi_raster=in_raster, out_raster=std_raster) self.assertTrue(os.path.exists(std_raster)) self.assertAlmostEqual(su.raster_properties(std_raster, "MEAN"), 0.671608391608) self.assertAlmostEqual(su.raster_properties(std_raster, "STD"), 99.655593923183)
def setUp(self): self.users = MagicMock(spec=Users) self._tmpdir = TempDir() self.root_path = self._tmpdir.name self._adapter = MagicMock(wraps=PixelatedDockerAdapter()) self._adapter.docker_image_name.return_value = 'pixelated' self._leap_provider_x509 = LeapProviderX509Info()
def build_pdf(self, source, texinputs=[]): texinputs.append( bytes.decode(subprocess.check_output(["which", "xelatex"])).strip()) with TempDir() as tmpdir, source.temp_saved(suffix=".latex", dir=tmpdir) as tmp: # close temp file, so other processes can access it also on Windows tmp.close() base_fn = os.path.splitext(tmp.name)[0] output_fn = base_fn + ".pdf" args = [self.xelatex, tmp.name] # create environment newenv = os.environ.copy() newenv["TEXINPUTS"] = os.pathsep.join(texinputs) + os.pathsep try: subprocess.check_call( args, cwd=tmpdir, env=newenv, stdin=open(os.devnull, "r"), stdout=open(os.devnull, "w"), stderr=open(os.devnull, "w"), ) except CalledProcessError as e: raise_from(LatexBuildError(base_fn + ".log"), e) return I(open(output_fn, "rb").read(), encoding=None)
def test_sample_loaded_memories(self): with TempDir() as temp_directory: replay_memories_file_name = os.path.join(temp_directory, 'replay_memories.dat') replay_memories1 = ReplayMemories(replay_memories_file_name, max_current_memories_in_ram=100) replay_memory1 = self._create_replay_memory(action_index=11) replay_memory2 = self._create_replay_memory(action_index=22) replay_memory3 = self._create_replay_memory(action_index=32) replay_memory4 = self._create_replay_memory(action_index=42) replay_memory5 = self._create_replay_memory(action_index=52) replay_memory6 = self._create_replay_memory(action_index=62) replay_memory7 = self._create_replay_memory(action_index=72) replay_memories1.append(replay_memory1) replay_memories1.append(replay_memory2) replay_memories1.append(replay_memory3) replay_memories1.append(replay_memory4) replay_memories1.append(replay_memory5) replay_memories1.append(replay_memory6) replay_memories1.append(replay_memory7) replay_memories1.save() replay_memories2 = ReplayMemories(replay_memories_file_name, max_current_memories_in_ram=100) sampled_replay_memories = replay_memories2.sample(5, seed=3) expected_replay_memories = [replay_memory2, replay_memory4, replay_memory7, replay_memory3, replay_memory5] self.assertItemsEqual(sampled_replay_memories, expected_replay_memories)
def test_get_filename_nonexistant(self, store, key): # NOTE: boto misbehaves here and tries to erase the target file # the parent tests use /dev/null, which you really should not try # to os.remove! with TempDir() as tmpdir: with pytest.raises(KeyError): store.get_file(key, os.path.join(tmpdir, 'a'))
def test_persist_and_retrieve(self): bundle1 = self._create_metrics_bundle(episode_number=31, average_delta_score=33.0, average_speed=20.0, average_action_value=49.0, average_loss=63.0, final_score=888.0, execution_time=114.0) bundle2 = self._create_metrics_bundle(episode_number=32, average_delta_score=123.0, average_speed=3.55, average_action_value=312.1, average_loss=11.0, final_score=1002.0, execution_time=114.0) with TempDir() as temp_directory: metrics1_file_name = os.path.join(temp_directory, 'metrics.dat') metrics1 = Metrics(metrics_path=metrics1_file_name, bundler=MetricsInTrainBundle) metrics1.append(bundle1) metrics1.append(bundle2) metrics1.persist_and_flush_memory() metrics2 = Metrics(metrics_path=metrics1_file_name, bundler=MetricsInTrainBundle) all_episode_metrics = metrics2.all_metric_bundles() self.assertSequenceEqual(all_episode_metrics, [bundle1, bundle2])
def test_sample_recent_memories_after_appending_to_loaded_memories(self): with TempDir() as temp_directory: replay_memories_file_name = os.path.join(temp_directory, 'replay_memories.dat') replay_memories1 = ReplayMemories(replay_memories_file_name, max_current_memories_in_ram=100) replay_memory1 = self._create_replay_memory(action_index=11) replay_memory2 = self._create_replay_memory(action_index=22) replay_memory3 = self._create_replay_memory(action_index=32) replay_memory4 = self._create_replay_memory(action_index=42) replay_memory5 = self._create_replay_memory(action_index=52) replay_memory6 = self._create_replay_memory(action_index=62) replay_memory7 = self._create_replay_memory(action_index=72) replay_memories1.append(replay_memory1) replay_memories1.append(replay_memory2) replay_memories1.append(replay_memory3) replay_memories1.save() replay_memories2 = ReplayMemories(replay_memories_file_name, max_current_memories_in_ram=100) replay_memories2.append(replay_memory4) replay_memories2.append(replay_memory5) replay_memories2.append(replay_memory6) replay_memories2.append(replay_memory7) sampled_replay_memories = replay_memories2.sample(5, recent_memories_span=5) self.assertTrue(replay_memory3 in sampled_replay_memories) self.assertTrue(replay_memory4 in sampled_replay_memories) self.assertTrue(replay_memory5 in sampled_replay_memories) self.assertTrue(replay_memory6 in sampled_replay_memories) self.assertTrue(replay_memory7 in sampled_replay_memories)
def test_retrieval_single_item(self): with TempDir() as temp_directory: temp_file_name = os.path.join(temp_directory, 'temp_file.name') items = IndexedItemsArchive(temp_file_name) items.append_and_save(["item0", "item1"]) self.assertEqual(items.fetch([0])[0], "item0") self.assertEqual(items.fetch([1])[0], "item1")
def test_sample_recent_memories(self): with TempDir() as temp_directory: replay_memories_file_name = os.path.join(temp_directory, 'replay_memories.dat') replay_memories = ReplayMemories(replay_memories_file_name, max_current_memories_in_ram=100) replay_memory1 = self._create_replay_memory(action_index=11) replay_memory2 = self._create_replay_memory(action_index=22) replay_memory3 = self._create_replay_memory(action_index=32) replay_memory4 = self._create_replay_memory(action_index=42) replay_memory5 = self._create_replay_memory(action_index=52) replay_memory6 = self._create_replay_memory(action_index=62) replay_memory7 = self._create_replay_memory(action_index=72) replay_memories.append(replay_memory1) replay_memories.append(replay_memory2) replay_memories.append(replay_memory3) replay_memories.append(replay_memory4) replay_memories.append(replay_memory5) replay_memories.append(replay_memory6) replay_memories.append(replay_memory7) sampled_replay_memories = replay_memories.sample(2, recent_memories_span=2) print(sampled_replay_memories) self.assertTrue(replay_memory6 in sampled_replay_memories) self.assertTrue(replay_memory7 in sampled_replay_memories)
def test_store(): with TempDir() as tmp: store = FileStore(dir=tmp) item = Item() empty_hash = item.hash store.put(item) item = Item(text="Foo Value") foo_hash = item.hash store.put(item) item = Item(text="Bar Value") bar_hash = item.hash store.put(item) item = store.item(empty_hash) assert item.hash == empty_hash item = store.item(foo_hash) assert item.hash == foo_hash assert item.text == "Foo Value" item = store.item(bar_hash) assert item.hash == bar_hash assert item.text == "Bar Value"
def test_idempotent_put(): with TempDir() as tmp: store = FileStore(dir=tmp) item = Item(text="Idempotent?") store.put(item) store.put(item) store.put(item)
def unpack_msg(input_file, output_dir, cfg): target_exts = cfg.targets with open(input_file, "rb") as fp: msg = email.message_from_file(fp) only_input_filename = os.path.split(input_file)[1] counter = 0 for part in msg.walk(): # multipart/* are just containers try: if part.get_content_maintype() == 'multipart': continue # Applications should really sanitize the given filename so that an # email message can't be used to overwrite important files m_filename = part.get_filename() filename = m_filename if filename: if filename.startswith("=?"): decoded = decode_header(filename) filename = decoded[0][0].decode(decoded[0][1].upper()) else: filename = escape_chars(filename) filename = only_input_filename.decode("utf-8") + "_" + filename ext = os.path.splitext(filename)[1].lower() else: ext = mimetypes.guess_extension(part.get_content_type()) if not ext: # Use a generic bag-of-bits extension ext = '.bin' filename = u'%s_part-%03d%s' % (only_input_filename, counter, ext) filename = filename.encode(fs_enc) if ext in cfg.targets: with open(os.path.join(output_dir, filename), 'wb') as of: of.write(part.get_payload(decode=True)) elif ext in arch_exts and not fnmatch(filename, cfg.exclude): with TempDir(dir=cfg.tempdir) as temp: archpath = os.path.join(temp, filename) with open(archpath, 'wb') as of: of.write(part.get_payload(decode=True)) for f in unpack_arch(archpath, temp, cfg): ext = os.path.splitext(f)[1].lower() if ext in cfg.targets and not fnmatch(f, cfg.exclude): path_from = os.path.join(temp, f) path_to = os.path.join(output_dir, filename + '_' + f) shutil.copy(path_from, path_to) counter += 1 except UnicodeDecodeError as e: print "oops:" print input_file raise print "encoded: ", type(m_filename), m_filename.encode( "string_escape") if m_filename and m_filename.startswith("=?"): decoded = decode_header(m_filename) print "tuple: ", type(decoded), decoded if cfg.remove == True: os.remove(input_file)
def test_purge_older_items_min_recent_items_to_keep_greater_then_to_len(self): with TempDir() as temp_directory: temp_file_name = os.path.join(temp_directory, 'temp_file.name') items = IndexedItemsArchive(temp_file_name, max_items_per_file=2) items.append_and_save(["item0", "item1"], purge_min_recent_items_to_keep=3) self.assertEqual(list(items), ["item0", "item1"])
def test_len_complex_items_on_new_session(self): with TempDir() as temp_directory: temp_file_name = os.path.join(temp_directory, 'temp_file.name') items1 = IndexedItemsArchive(temp_file_name) items1.append_and_save([np.array([0, 1, 2]), np.array([3, 4, 5]), np.array([6, 7, 8])]) items2 = IndexedItemsArchive(temp_file_name) self.assertEqual(len(items2), 3)
def test_retrieval_complex_items_on_new_session(self): with TempDir() as temp_directory: temp_file_name = os.path.join(temp_directory, 'temp_file.name') items1 = IndexedItemsArchive(temp_file_name) items1.append_and_save([np.array([0, 1, 2]), np.array([3, 4, 5]), np.array([6, 7, 8])]) items2 = IndexedItemsArchive(temp_file_name) np.testing.assert_array_equal(items2.fetch([0, 2]), [np.array([0, 1, 2]), np.array([6, 7, 8])])
def test_len_on_new_session(self): with TempDir() as temp_directory: temp_file_name = os.path.join(temp_directory, 'temp_file.name') items1 = IndexedItemsArchive(temp_file_name) items1.append_and_save(["item0", "item1", "item2", "item3"]) items2 = IndexedItemsArchive(temp_file_name) self.assertEqual(len(items2), 4)
def test_retrieval_multiple_items_on_new_session_with_non_sequential_order(self): with TempDir() as temp_directory: temp_file_name = os.path.join(temp_directory, 'temp_file.name') items1 = IndexedItemsArchive(temp_file_name) items1.append_and_save(["item0", "item1", "item2", "item3"]) items2 = IndexedItemsArchive(temp_file_name) self.assertEqual(items2.fetch([2,0,1,3]), ["item0", "item1", "item2", "item3"])
def test_get_into_file(self, store, key, value): with TempDir() as tmpdir: store.put(key, value) out_filename = os.path.join(tmpdir, 'output') store.get_file(key, out_filename) assert open(out_filename, 'rb').read() == value
def __enter__(self): self.tempdir_obj = TempDir() tempdir = self.tempdir_obj.__enter__() logfile = join(tempdir, self.description + "out") os.mkfifo(logfile) bash_command = "{} 2>&1 | tee -i --output-error=warn {} | tee -i --output-error=warn {}_log.txt".format( self.spawn_cmd, pipes.quote(logfile), self.description) self.pane = check_output([ 'tmux', 'split-window', '-hdP', '-F', '#{pane_id}', '-t', pane, 'bash', '-c', bash_command ]).strip() self.proc = Expect(os.open(logfile, os.O_RDONLY | os.O_NONBLOCK), quiet=True) self.proc.expect("(rr) ") return self
def setUp(self): self.d = TempDir() self.keyfile_path = os.path.join(self.d.name, 'keyfile') self.database_path = os.path.join(self.d.name, 'database') self.keyfile = K.create( self.keyfile_path, self.database_path) D.create(self.keyfile)
def create_recording(qemu_path, qcow, snapshot, command, copy_directory, recording_path, expect_prompt, cdrom, isoname=None, rr=False, savevm=False, perf=False, env={}, extra_args=None, stdin=False): assert not (rr and perf) recording_path = realpath(recording_path) if not isoname: isoname = copy_directory + '.iso' with TempDir() as tempdir, \ Qemu(qemu_path, qcow, snapshot, tempdir, rr=rr, perf=perf, expect_prompt=expect_prompt, extra_args=extra_args) as qemu: if os.listdir(copy_directory): progress("Creating ISO {}...".format(isoname)) make_iso(copy_directory, isoname) progress("Inserting CD...") qemu.run_monitor("change {} \"{}\"".format(cdrom, isoname)) qemu.run_console("mkdir -p {}".format(pipes.quote(copy_directory))) # Make sure cdrom didn't automount # Make sure guest path mirrors host path qemu.run_console("while ! mount /dev/cdrom {}; ".format( pipes.quote(copy_directory)) + "do sleep 0.3; umount /dev/cdrom; done") # if there is a setup.sh script in the replay/proc_name/cdrom/ folder # then run that setup.sh script first (good for scriptst that need to # prep guest environment before script runs qemu.run_console("{}/setup.sh &> /dev/null || true".format( pipes.quote(copy_directory))) # Important that we type command into console before recording starts and only # hit enter once we've started the recording. progress("Running command inside guest.") if stdin: # only support for "[binary] [STDIN_file]" assert (len(command) == 2) command.insert(1, "<") qemu.type_console(sp.list2cmdline(env_to_list(env) + command)) # start PANDA recording qemu.run_monitor("begin_record \"{}\"".format(recording_path)) qemu.run_console(timeout=1200) # end PANDA recording progress("Ending recording...") qemu.run_monitor("end_record")
def test_purge_older_items_does_not_purge_when_items_stored_single_file(self): with TempDir() as temp_directory: temp_file_name = os.path.join(temp_directory, 'temp_file.name') items = IndexedItemsArchive(temp_file_name) items.append_and_save(["item0", "item1", "item2", "item3", "item4", "item5", "item6", "item7"], purge_min_recent_items_to_keep=2) self.assertEqual(list(items), ["item0", "item1", "item2", "item3", "item4", "item5", "item6", "item7"])
def testSlopeRun(self): with TempDir() as d: slope_raster = os.path.join(d, 'test_slope.tif') arcpy.env.scratchWorkspace = d slope.main(bathy=config.bathy_raster, out_raster=slope_raster) self.assertTrue(os.path.exists(slope_raster)) self.assertAlmostEqual(su.raster_properties(slope_raster, "MEAN"), 3.802105241105673)
def test_purge_older_items_and_reload(self): with TempDir() as temp_directory: temp_file_name = os.path.join(temp_directory, 'temp_file.name') items1 = IndexedItemsArchive(temp_file_name, max_items_per_file=3) items1.append_and_save(["item0", "item1", "item2", "item3", "item4", "item5", "item6", "item7"], purge_min_recent_items_to_keep=4) items2 = IndexedItemsArchive(temp_file_name) self.assertEqual(list(items2), ["item3", "item4", "item5", "item6", "item7"])
def test_items_in_multiple_files_tolerate_different_max_items_in_between_sessions(self): with TempDir() as temp_directory: temp_file_name = os.path.join(temp_directory, 'temp_file.name') items1 = IndexedItemsArchive(temp_file_name, max_items_per_file=2) items1.append_and_save(["item0", "item1", "item2", "item3"]) items2 = IndexedItemsArchive(temp_file_name, max_items_per_file=3) items2.append_and_save(["item4", "item5"]) self.assertEqual(list(items2), ["item0", "item1", "item2", "item3", "item4", "item5"])
def test_simple_store(): with TempDir() as tmp: store = FileStore(dir=tmp) text = "This is a test" item = Item(text=text) store.put(item) got = store.item(item.hash) assert item.hash == got.hash assert item.text == got.text
def __enter__(self): self.tempdir_obj = TempDir() tempdir = self.tempdir_obj.__enter__() logfile = join(tempdir, self.description + "out") os.mkfifo(logfile) bash_command = "{} 2>&1 | tee -i --output-error=warn {} | tee -i --output-error=warn {}_log.txt".format( self.spawn_cmd, pipes.quote(logfile), self.description) self.pane = check_output([ 'tmux', 'split-window', '-hdP', '-F', '#{pane_id}', '-t', pane, 'bash', '-c', bash_command]).strip() self.proc = Expect(os.open(logfile, os.O_RDONLY | os.O_NONBLOCK), quiet=True) self.proc.expect("(rr) ") return self
class ForkProviderTest(unittest.TestCase): def setUp(self): self._tmpdir = TempDir() self.root_path = self._tmpdir.name self.runner = MagicMock(spec=Adapter) self.provider = ForkProvider(self.root_path, self.runner) self.provider.initialize() def tearDown(self): self._tmpdir.dissolve() @patch('random.getrandbits') def test_new_instance_can_be_added(self, randmock): hashed_password = '******' hex_salt = 31 randmock.return_value = 1 self.provider.add('test', 'password') instance_path = join(self.root_path, 'test') cfg_path = join(instance_path, 'agent.cfg') data_path = join(instance_path, 'data') gnupg_path = join(instance_path, 'gnupg') self.assertTrue(isfile(cfg_path), 'No config file created') self.assertTrue(isdir(instance_path), 'No folder for user has been created') self.assertTrue(isdir(data_path), 'No maipile folder for user has been created') self.assertTrue(isdir(gnupg_path), 'No gnupg folder for user has been created') self.assert_config_file(cfg_path, 'test', hashed_password, hex_salt) self.assertEqual(stat.S_IFDIR | stat.S_IRWXU, os.stat(instance_path).st_mode) self.assertEqual(stat.S_IFDIR | stat.S_IRWXU, os.stat(data_path).st_mode) self.assertEqual(stat.S_IFDIR | stat.S_IRWXU, os.stat(gnupg_path).st_mode) @patch('random.getrandbits') def test_random_salt_is_used(self, randmock): hashed_password = '******' salt = 2 hex_salt = 32 randmock.return_value = salt instance_path = join(self.root_path, 'test') cfg_path = join(instance_path, 'agent.cfg') self.provider.add('test', 'password') self.assert_config_file(cfg_path, 'test', hashed_password, hex_salt) def test_multiple_instances_can_be_added(self): self.provider.add('first', 'password') self.provider.add('second', 'password') self.assertEqual(['first', 'second'], self.provider.list()) def test_instances_can_not_be_added_twice(self): self.provider.add('test', 'password') self.assertRaises(InstanceAlreadyExistsError, self.provider.add, 'test', 'password') def test_remove_raises_exception_if_instance_does_not_exist(self): self.assertRaises(ValueError, self.provider.remove, 'test') def test_instances_can_be_removed(self): self.provider.add('test', 'password') self.provider.remove('test') def test_that_non_existing_instance_cannot_be_started(self): self.assertRaises(InstanceNotFoundError, self.provider.start, 'test') def test_that_instance_can_be_started_and_gets_initialized(self): self.provider.add('test', 'password') self.provider.start('test') self.runner.initialize.assert_called_with('test') self.runner.start.assert_called_with('test') def test_that_instance_cannot_be_started_twice(self): self.provider.add('test', 'password') self.provider.start('test') self.assertRaises(InstanceAlreadyRunningError, self.provider.start, 'test') def test_that_running_instances_are_in_runnig_list(self): self._init_runner_memory_usage() for name in ['one', 'two', 'three']: self.provider.add(name, 'password') self.provider.start(name) self.provider.add('not-started', 'password') self.assertEqual({'one', 'two', 'three'}, set(self.provider.list_running())) def test_that_non_existing_instance_cannot_be_stopped(self): self.assertRaises(InstanceNotRunningError, self.provider.stop, 'test') def test_that_non_started_instance_cannot_be_stopped(self): self.provider.add('test', 'password') self.assertRaises(InstanceNotRunningError, self.provider.stop, 'test') def test_that_running_instance_can_be_stopped(self): process = MagicMock(spec=ForkedProcess) self.runner.start.return_value = process self.provider.add('test', 'password') self.provider.start('test') self.provider.stop('test') process.terminate.assert_called_once_with() def test_that_instance_cannot_be_stopped_twice(self): self.provider.add('test', 'password') self.provider.start('test') self.provider.stop('test') self.assertRaises(InstanceNotRunningError, self.provider.stop, 'test') def test_that_existing_agents_are_autodiscovered(self): agent = os.path.join(self.root_path, 'test') os.mkdir(agent) self.provider = ForkProvider(self.root_path, self.runner) self.provider.initialize() self.assertEqual(['test'], self.provider.list()) def test_that_status_returns_current_port(self): process = MagicMock(spec=ForkedProcess(None, 1234)) process.port = 1234 self.runner.start.return_value = process self.provider.add('test', 'password') self.provider.start('test') status = self.provider.status('test') self.assertEqual({'port': 1234, 'state': 'running'}, status) def test_authenticate(self): self.provider.add('test', 'password') self.assertTrue(self.provider.authenticate('test', 'password')) self.assertFalse(self.provider.authenticate('test', 'something else')) def test_unicode_passwords_dont_cause_type_error(self): self.provider.add('test', u'password') self.assertTrue(self.provider.authenticate('test', u'password')) def assert_config_file(self, filename, name, hashed_password, salt): with open(filename, 'r') as file: content = file.read() self.assertEqual('[agent]\nname = %s\nhashed_password = %s\nsalt = %s\n\n' % (name, hashed_password, salt), content) def test_memory_usage_zero_if_no_processes(self): self.assertEqual({'total_usage': 0, 'average_usage': 0, 'agents': []}, self.provider.memory_usage()) def test_memory_usage_with_process(self): # given process = MagicMock(spec=ForkedProcess(None, 1234)) process.port = 1234 process.memory_usage.return_value = 1024 self.runner.start.return_value = process self.provider.add('test', 'password') self.provider.start('test') # when usage = self.provider.memory_usage() # then self.assertEqual({'total_usage': 1024, 'average_usage': 1024, 'agents': [ {'name': 'test', 'memory_usage': 1024} ]}, usage) @patch('pixelated.provider.fork.psutil.virtual_memory') def test_that_instance_cannot_be_started_with_too_little_memory_left(self, vm_mock): # given svmem = namedtuple('svmem', ['free']) free_memory = 1024 * 1024 vm_mock.return_value = svmem(free_memory) process = MagicMock(spec=ForkedProcess) process.memory_usage.return_value = free_memory + 1 self.runner.start.return_value = process self.provider.add('memory monster', 'password') self.provider.start('memory monster') self.provider.add('second', 'password') # when/then self.assertRaises(NotEnoughFreeMemory, self.provider.start, 'second') process.memory_usage.return_value = free_memory - 1 self.provider.start('second') def _init_runner_memory_usage(self): def new_process(*args): process = MagicMock(spec=ForkedProcess) process.memory_usage.return_value = 1024 return process self.runner.start.side_effect = new_process
def setUp(self): self._tmpdir = TempDir() self.root_path = self._tmpdir.name self.runner = MagicMock(spec=Adapter) self.provider = ForkProvider(self.root_path, self.runner) self.provider.initialize()
class DockerProviderTest(unittest.TestCase): def setUp(self): self._provider_hostname = 'example.org' self.users = MagicMock(spec=Users) self._tmpdir = TempDir() self.root_path = self._tmpdir.name self._adapter = MagicMock(wraps=PixelatedDockerAdapter(self._provider_hostname)) self._adapter.docker_image_name.return_value = 'pixelated' self._leap_provider_x509 = LeapProviderX509Info() def tearDown(self): self._tmpdir.dissolve() def test_that_docker_api_version_is_pinned_to_v1_14(self): self.assertEqual('1.14', DOCKER_API_VERSION) @patch('pixelated.provider.docker.docker.Client') def test_constructor_expects_docker_url(self, docker_mock): DockerProvider(self.root_path, self._adapter, self._leap_provider_x509, 'some docker url') @patch('pixelated.provider.docker.docker.Client') def test_initialize_builds_docker_image(self, docker_mock): # given client = docker_mock.return_value client.images.return_value = [] dockerfile = pkg_resources.resource_string('pixelated.resources', 'Dockerfile.pixelated') # when DockerProvider(self._adapter, 'leap_provider', self._leap_provider_x509, 'some docker url').initialize() # then docker_mock.assert_called_once_with(base_url="some docker url", version=DOCKER_API_VERSION) client.build.assert_called_once_with(path=None, fileobj=StringIOMatcher(dockerfile), tag='pixelated:latest') @patch('pixelated.provider.docker.docker.Client') def test_initialize_downloads_docker_image_if_image_name_contains_slash(self, docker_mock): # given client = docker_mock.return_value client.images.return_value = [] self._adapter.docker_image_name.return_value = 'pixelated/pixelated-user-agent' # when DockerProvider(self._adapter, 'leap_provider', self._leap_provider_x509, 'some docker url').initialize() # then docker_mock.assert_called_once_with(base_url='some docker url', version=DOCKER_API_VERSION) client.pull.assert_called_with(tag='latest', repository='pixelated/pixelated-user-agent', stream=True) @patch('pixelated.provider.docker.docker.Client') def test_initialize_downloads_and_starts_logger_docker_image_if_not_yet_available(self, docker_mock): # given client = docker_mock.return_value client.images.return_value = [] container = {'Id': 'some id'} client.create_container.return_value = container expected_syslog_tag = '.user_agent' # when DockerProvider(self._adapter, 'leap_provider', self._leap_provider_x509, 'some docker url').initialize() # then docker_mock.assert_called_once_with(base_url='some docker url', version=DOCKER_API_VERSION) client.pull.assert_called_with(tag='latest', repository='pixelated/logspout', stream=True) client.create_container.assert_called_once_with( image='pixelated/logspout:latest', command='syslog://*****:*****@patch('pixelated.provider.docker.docker.Client') def test_initialize_skips_image_build_or_download_if_already_available(self, docker_mock): # given client = docker_mock.return_value client.images.return_value = [ {'Created': 1404833111, 'VirtualSize': 297017244, 'ParentId': '57885511c8444c2b89743bef8b89eccb65f302b2a95daa95dfcc9b972807b6db', 'RepoTags': ['pixelated:latest'], 'Id': 'b4f10a2395ab8dfc5e1c0fae26fa56c7f5d2541debe54263105fe5af1d263189', 'Size': 181956643}] provider = DockerProvider(self._adapter, 'leap_provider', self._leap_provider_x509) # when provider.initialize() # then self.assertFalse(client.build.called) self.assertFalse(provider.initializing) @patch('pixelated.provider.docker.docker.Client') def test_initialize_doesnt_download_logger_image_if_already_available(self, docker_mock): # given client = docker_mock.return_value client.images.return_value = [ {'Created': 1404833111, 'VirtualSize': 297017244, 'ParentId': '57885511c8444c2b89743bef8b89eccb65f302b2a95daa95dfcc9b972807b6db', 'RepoTags': ['pixelated/logspout:latest'], 'Id': 'b4f10a2395ab8dfc5e1c0fae26fa56c7f5d2541debe54263105fe5af1d263189', 'Size': 181956643}] # when DockerProvider(self._adapter, 'leap_provider', self._leap_provider_x509, 'some docker url').initialize() # then docker_mock.assert_called_once_with(base_url='some docker url', version=DOCKER_API_VERSION) client.pull.assert_never_called_with(tag='latest', repository='pixelated/logspout', stream=True) @patch('pixelated.provider.docker.docker.Client') def test_reports_initializing_while_initialize_is_running(self, docker_mock): # given client = docker_mock.return_value client.images.return_value = [] def build(path, fileobj, tag): sleep(0.2) return [] client.build.side_effect = build provider = DockerProvider(self._adapter, 'some provider', self._leap_provider_x509, 'some docker url') self.assertTrue(provider.initializing) # when t = Thread(target=provider.initialize) # move to thread so that initializing behaviour is observable t.start() # then sleep(0.1) self.assertTrue(provider.initializing) t.join() self.assertFalse(provider.initializing) @patch('pixelated.provider.docker.docker.Client') def test_reports_initializing_while_initialize_is_running_and_image_downloaded(self, docker_mock): # given client = docker_mock.return_value client.images.return_value = [] self._adapter.docker_image_name.return_value = 'pixelated/pixelated-user-agent' def download(repository, tag, stream): sleep(0.2) return [] client.pull.side_effect = download provider = DockerProvider(self._adapter, 'some provider', self._leap_provider_x509, 'some docker url') self.assertTrue(provider.initializing) # when t = Thread(target=provider.initialize) # move to thread so that initializing behaviour is observable t.start() # then sleep(0.1) self.assertTrue(provider.initializing) t.join() self.assertFalse(provider.initializing) @patch('pixelated.provider.docker.docker.Client') def test_throws_initializing_exception_while_initializing(self, docker_mock): # given provider = DockerProvider(self._adapter, 'provider url', self._leap_provider_x509, 'some docker url') # when/then self.assertRaises(ProviderInitializingException, provider.start, 'test') self.assertRaises(ProviderInitializingException, provider.remove, 'test') self.assertRaises(ProviderInitializingException, provider.list_running) self.assertRaises(ProviderInitializingException, provider.stop, 'test') self.assertRaises(ProviderInitializingException, provider.status, 'test') self.assertRaises(ProviderInitializingException, provider.memory_usage) @patch('pixelated.provider.docker.docker.Client') def test_that_instance_can_be_started(self, docker_mock): expected_extra_hosts = {'nicknym.example.tld': '172.17.42.1', 'pixelated.example.tld': '172.17.42.1', 'api.example.tld': '172.17.42.1', 'example.tld': '172.17.42.1'} uid = os.getuid() self._adapter.docker_image_name.return_value = 'pixelated/pixelated-user-agent' client = docker_mock.return_value provider = self._create_initialized_provider(self._adapter, 'some docker url') prepare_pixelated_container = MagicMock() container = MagicMock() client.create_container.side_effect = [prepare_pixelated_container, container] client.wait.return_value = 0 self._leap_provider_x509.ca_bundle = 'some ca bundle' with patch('pixelated.provider.docker.socket.getfqdn') as mock: mock.return_value = 'pixelated.example.tld' provider.start(self._user_config('test')) client.create_container.assert_any_call('pixelated/pixelated-user-agent', '/bin/bash -l -c "/usr/bin/pixelated-user-agent --leap-home /mnt/user --host 0.0.0.0 --port 4567 --organization-mode --leap-provider-cert /mnt/user/dispatcher-leap-provider-ca.crt"', mem_limit='300m', user=uid, name='test', volumes=['/mnt/user'], ports=[4567], environment={'DISPATCHER_LOGOUT_URL': '/auth/logout', 'FEEDBACK_URL': 'https://example.org/tickets'}, stdin_open=True) client.create_container.assert_any_call('pixelated/pixelated-user-agent', '/bin/true', name='pixelated_prepare', volumes=['/mnt/user'], environment={'DISPATCHER_LOGOUT_URL': '/auth/logout', 'FEEDBACK_URL': 'https://example.org/tickets'}) data_path = join(self.root_path, 'test', 'data') client.start.assert_any_call(container, binds={data_path: {'bind': '/mnt/user', 'ro': False}}, port_bindings={4567: ('127.0.0.1', 5000)}, extra_hosts=expected_extra_hosts) client.start.assert_any_call(prepare_pixelated_container, binds={data_path: {'bind': '/mnt/user', 'ro': False}}) @patch('pixelated.provider.docker.docker.Client') def test_that_existing_container_gets_reused(self, docker_mock): client = docker_mock.return_value client.containers.side_effect = [[], [{u'Status': u'Exited (-1) About an hour ago', u'Created': 1405332375, u'Image': u'pixelated:latest', u'Ports': [], u'Command': u'/bin/bash -l -c "/usr/bin/pixelated-user-agent --dispatcher"', u'Names': [u'/test'], u'Id': u'adfd4633fc42734665d7d98076b19b5f439648678b3b76db891f9d5072af50b6'}]] provider = self._create_initialized_provider(self._adapter, 'some docker url') container = MagicMock() client.create_container.return_value = container provider.start(self._user_config('test')) client.containers.assert_called_with(all=True) self.assertFalse(client.build.called) @patch('pixelated.provider.docker.docker.Client') def test_running_containers_empty_if_none_started(self, docker_mock): client = docker_mock.return_value client.containers.return_value = [] provider = self._create_initialized_provider(self._adapter, 'some docker url') running = provider.list_running() self.assertEqual([], running) @patch('pixelated.provider.docker.docker.Client') def test_running_returns_running_container(self, docker_mock): client = docker_mock.return_value client.containers.side_effect = [[], [], [{u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'}]] client.wait.return_value = 0 provider = self._create_initialized_provider(self._adapter, 'some docker url') provider.start(self._user_config('test')) running = provider.list_running() self.assertEqual(['test'], running) @patch('pixelated.provider.docker.docker.Client') def test_a_container_cannot_be_started_twice(self, docker_mock): client = docker_mock.return_value client.containers.side_effect = [[], [], [{u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'}]] client.wait.return_value = 0 provider = self._create_initialized_provider(self._adapter, 'some docker url') user_config = self._user_config('test') provider.start(user_config) self.assertRaises(InstanceAlreadyRunningError, provider.start, user_config) @patch('pixelated.provider.docker.docker.Client') def test_stopping_not_running_container_raises_value_error(self, docker_mock): client = docker_mock.return_value client.containers.return_value = [] provider = self._create_initialized_provider(self._adapter, 'some docker url') self.assertRaises(InstanceNotRunningError, provider.stop, 'test') @patch('pixelated.provider.docker.docker.Client') def test_stop_running_container(self, docker_mock): # given user_config = self._user_config('test') client = docker_mock.return_value container = {u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 5000, u'PrivatePort': 4567}], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'} client.containers.side_effect = [[], [], [container], [container], [container]] client.wait.return_value = 0 provider = self._create_initialized_provider(self._adapter, 'some docker url') provider.pass_credentials_to_agent(user_config, 'test') provider.start(user_config) # when provider.stop('test') # then client.stop.assert_called_once_with(container, timeout=10) self.assertFalse(5000 in provider._used_ports()) self.assertTrue('test' not in provider._credentials) @patch('pixelated.provider.docker.docker.Client') def test_stop_running_container_calls_kill_if_stop_times_out(self, docker_mock): # given client = docker_mock.return_value container = {u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 5000, u'PrivatePort': 4567}], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'} client.containers.side_effect = [[], [], [container], [container], [container]] client.wait.return_value = 0 client.stop.side_effect = requests.exceptions.Timeout provider = self._create_initialized_provider(self._adapter, 'some docker url') provider.start(self._user_config('test')) # when provider.stop('test') # then client.stop.assert_called_once_with(container, timeout=10) client.kill.assert_called_once_with(container) @patch('pixelated.provider.docker.docker.Client') def test_status_stopped(self, docker_mock): provider = self._create_initialized_provider(self._adapter, 'some docker url') self.assertEqual({'state': 'stopped'}, provider.status('test')) @patch('pixelated.provider.docker.docker.Client') def test_status_running(self, docker_mock): client = docker_mock.return_value container = {u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 5000, u'PrivatePort': 33144}], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'} client.containers.side_effect = [[], [], [container], [container]] client.wait.return_value = 0 provider = self._create_initialized_provider(self._adapter, 'some docker url') provider.start(self._user_config('test')) self.assertEqual({'state': 'running', 'port': 5000}, provider.status('test')) @patch('pixelated.provider.docker.Process') @patch('pixelated.provider.docker.docker.Client') def test_memory_usage(self, docker_mock, process_mock): # given container = {u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 5000, u'PrivatePort': 33144}], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'} info = {u'HostsPath': u'/var/lib/docker/containers/f2cdb04277e9e056c610240edffe8ff94ae272e462312c270e5300975d60af89/hosts', u'Created': u'2014-07-14T13:17:46.17558664Z', u'Image': u'f63df19194389be6481a174b36d291c483c8982d5c07485baa71a46b7f6582c8', u'Args': [], u'Driver': u'aufs', u'HostConfig': {u'PortBindings': {u'4567/tcp': [{u'HostPort': u'5000', u'HostIp': u'0.0.0.0'}]}, u'NetworkMode': u'', u'Links': None, u'LxcConf': None, u'ContainerIDFile': u'', u'Binds': [u'/tmp/multipile/folker:/mnt/user:rw'], u'PublishAllPorts': False, u'Dns': None, u'DnsSearch': None, u'Privileged': False, u'VolumesFrom': None}, u'MountLabel': u'', u'VolumesRW': {u'/mnt/user': True}, u'State': {u'Pid': 3250, u'Paused': False, u'Running': True, u'FinishedAt': u'0001-01-01T00:00:00Z', u'StartedAt': u'2014-07-14T13:17:46.601922899Z', u'ExitCode': 0}, u'ExecDriver': u'native-0.2', u'ResolvConfPath': u'/etc/resolv.conf', u'Volumes': {u'/mnt/user': u'/tmp/multipile/folker'}, u'Path': u'/bin/bash -l -c "/usr/bin/pixelated-user-agent --dispatcher"', u'HostnamePath': u'/var/lib/docker/containers/f2cdb04277e9e056c610240edffe8ff94ae272e462312c270e5300975d60af89/hostname', u'ProcessLabel': u'', u'Config': {u'MemorySwap': 0, u'Hostname': u'f2cdb04277e9', u'Entrypoint': None, u'PortSpecs': None, u'Memory': 0, u'OnBuild': None, u'OpenStdin': False, u'Cpuset': u'', u'Env': [u'HOME=/', u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'], u'User': u'', u'CpuShares': 0, u'AttachStdout': True, u'NetworkDisabled': False, u'WorkingDir': u'', u'Cmd': [u'/bin/bash -l -c "/usr/bin/pixelated-user-agent --dispatcher"'], u'StdinOnce': False, u'AttachStdin': False, u'Volumes': {u'/mnt/user': {}}, u'Tty': False, u'AttachStderr': True, u'Domainname': u'', u'Image': u'pixelated', u'ExposedPorts': {u'4567/tcp': {}}}, u'Id': u'f2cdb04277e9e056c610240edffe8ff94ae272e462312c270e5300975d60af89', u'NetworkSettings': {u'Bridge': u'docker0', u'PortMapping': None, u'Gateway': u'172.17.42.1', u'IPPrefixLen': 16, u'IPAddress': u'172.17.0.14', u'Ports': {u'4567/tcp': [{u'HostPort': u'5000', u'HostIp': u'0.0.0.0'}]}}, u'Name': u'/folker'} client = docker_mock.return_value client.containers.return_value = [container] client.inspect_container.return_value = info psutil_mock = process_mock.return_value psutil_mock.memory_info.return_value = pmem(1024, 2048) provider = self._create_initialized_provider(self._adapter, 'some docker url') # when usage = provider.memory_usage() # then self.assertEqual({'total_usage': 1024, 'average_usage': 1024, 'agents': [ {'name': 'test', 'memory_usage': 1024} ]}, usage) @patch('pixelated.provider.docker.docker.Client') def test_remove_error_if_not_exist(self, docker_mock): provider = self._create_initialized_provider(self._adapter, 'some docker url') self.assertRaises(ValueError, provider.remove, self._user_config('does_not_exist')) @patch('pixelated.provider.docker.docker.Client') def test_remove(self, docker_mock): # given user_config = self._user_config('test') os.makedirs(join(user_config.path, 'data')) client = docker_mock.return_value client.containers.return_value = [] provider = self._create_initialized_provider(self._adapter, 'some docker url') # when provider.remove(user_config) # then self.assertTrue(exists(user_config.path)) self.assertFalse(exists(join(user_config.path, 'data'))) @patch('pixelated.provider.docker.docker.Client') def test_cannot_remove_while_running(self, docker_mock): # given client = docker_mock.return_value container = {u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 5000, u'PrivatePort': 4567}], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'} client.containers.side_effect = [[], [], [container]] client.wait.return_value = 0 provider = self._create_initialized_provider(self._adapter, 'some docker url') user_config = self._user_config('test') provider.start(user_config) # when/then self.assertRaises(ValueError, provider.remove, user_config) @patch('pixelated.provider.docker.docker.Client') def test_reset_data(self, docker_mock): # given user_config = self._user_config('test') os.makedirs(join(user_config.path, 'data')) client = docker_mock.return_value client.containers.return_value = [] provider = self._create_initialized_provider(self._adapter, 'some docker url') # when provider.reset_data(user_config) # then self.assertTrue(exists(user_config.path)) self.assertFalse(exists(join(user_config.path, 'data'))) @patch('pixelated.provider.docker.docker.Client') def test_reset_data_does_not_complain_if_there_is_no_data(self, docker_mock): # given user_config = self._user_config('test') client = docker_mock.return_value client.containers.return_value = [] provider = self._create_initialized_provider(self._adapter, 'some docker url') # when provider.reset_data(user_config) # then self.assertTrue(exists(user_config.path)) self.assertFalse(exists(join(user_config.path, 'data'))) @patch('pixelated.provider.docker.docker.Client') def test_reset_data_fails_if_user_does_not_exist(self, docker_mock): # given user_config = self._user_config('test') shutil.rmtree(user_config.path) client = docker_mock.return_value client.containers.return_value = [] provider = self._create_initialized_provider(self._adapter, 'some docker url') # when/then self.assertRaises(ValueError, provider.reset_data, user_config) @patch('pixelated.provider.docker.docker.Client') def test_reset_data_fails_if_agent_is_running(self, docker_mock): # given client = docker_mock.return_value container = {u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 5000, u'PrivatePort': 4567}], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'} client.containers.side_effect = [[], [], [container]] client.wait.return_value = 0 provider = self._create_initialized_provider(self._adapter, 'some docker url') user_config = self._user_config('test') provider.start(user_config) # when/then self.assertRaises(InstanceAlreadyRunningError, provider.reset_data, user_config) @patch('pixelated.provider.docker.TempDir') @patch('pixelated.provider.docker.pkg_resources') @patch('pixelated.provider.docker.docker.Client') def test_use_build_script_instead_of_docker_file_if_available(self, docker_mock, res_mock, tempDir_mock): # given provider = DockerProvider(self._adapter, 'leap_provider', self._leap_provider_x509) tempBuildDir = TempDir() try: tempDir_mock.return_value = tempBuildDir tempBuildDir_name = tempBuildDir.name with NamedTemporaryFile() as file: res_mock.resource_exists.return_value = True res_mock.resource_string.return_value = '#!/bin/bash\necho %s $PWD > %s' % (file.name, file.name) # when provider.initialize() # then res_mock.resource_exists.assert_called_with('pixelated.resources', 'init-pixelated-docker-context.sh') res_mock.resource_string.assert_called_with('pixelated.resources', 'init-pixelated-docker-context.sh') with open(file.name, "r") as input: data = input.read().replace('\n', '') self.assertEqual('%s %s' % (file.name, os.path.realpath(tempBuildDir_name)), data) docker_mock.return_value.build.assert_called_once_with(path=tempBuildDir_name, tag='pixelated:latest', fileobj=None) finally: tempBuildDir.dissolve() @patch('pixelated.provider.docker.docker.Client') @patch('pixelated.provider.docker.CredentialsToDockerStdinWriter') def test_that_credentials_are_passed_to_agent_by_stdin(self, credentials_mock, docker_mock): # given user_config = self._user_config('test') provider = self._create_initialized_provider(self._adapter, 'some docker url') prepare_pixelated_container = MagicMock() container = MagicMock() class ProcessStub(object): def start(self): self._target() def __init__(self, target): self._target = target client = docker_mock.return_value client.create_container.side_effect = [prepare_pixelated_container, container] client.wait.return_value = 0 # when provider.pass_credentials_to_agent(user_config, 'password') provider.start(user_config) # then credentials_mock.return_value.start.assert_called_once_with() @patch('pixelated.provider.docker.docker.Client') def test_provider_checks_working_connection_to_docker(self, docker_mock): client = docker_mock.return_value client.info.side_effect = Exception self.assertRaises(Exception, DockerProvider, self._adapter, 'leap_provider', self._leap_provider_x509) @patch('pixelated.provider.docker.docker.Client') def test_that_provider_x509_ca_bundle_is_copied_to_agent(self, docker_mock): user_config = self._user_config('test') provider = self._create_initialized_provider(self._adapter, 'some docker url') client = docker_mock.return_value client.wait.return_value = 0 with NamedTemporaryFile() as ca_file: with open(ca_file.name, 'w') as fd: fd.write('some certificate') self._leap_provider_x509.ca_bundle = ca_file.name provider.start(user_config) self.assertTrue(exists(join(self.root_path, 'test', 'data', 'dispatcher-leap-provider-ca.crt'))) def _create_initialized_provider(self, adapter, docker_url=DockerProvider.DEFAULT_DOCKER_URL): provider = DockerProvider(adapter, 'leap_provider_hostname', self._leap_provider_x509, docker_url) provider._initializing = False return provider def _user_config(self, name): path = join(self.root_path, name) os.makedirs(path) return UserConfig(name, path)
class RRInstance(object): def __init__(self, description, rr_replay, source_pane): self.rr_replay = rr_replay self.description = description self.spawn_cmd = "{} replay {}".format( shlex.quote(cli_args.rr), shlex.quote(rr_replay)) self.source_pane = source_pane self.breakpoints = {} self.watches_set = 0 self.instr_to_checkpoint = sorteddict() def __repr__(self): return "RRInstance({!r})".format(self.description) @cached_property def arch(self): rr_ps = check_output([cli_args.rr, 'ps', self.rr_replay]) qemu_regex = r"qemu-system-({})".format("|".join(SUPPORTED_ARCHS.keys())) re_result = re.search(qemu_regex, rr_ps) if not re_result: raise RuntimeError("Unsupported architecture!") return SUPPORTED_ARCHS[re_result.group(1)] # Runs in child process. def sendline(self, msg): check_call(['tmux', 'send-keys', '-t', self.pane, '-l', msg]) check_call(['tmux', 'send-keys', '-t', self.pane, 'ENTER']) # Runs in child process. def kill(self): check_call(['tmux', 'kill-pane', '-t', self.pane]) def __enter__(self): self.tempdir_obj = TempDir() tempdir = self.tempdir_obj.__enter__() logfile = join(tempdir, self.description + "out") os.mkfifo(logfile) bash_command = "{} 2>&1 | tee -i --output-error=warn {} | tee -i --output-error=warn {}_log.txt".format( self.spawn_cmd, shlex.quote(logfile), self.description) self.pane = check_output([ 'tmux', 'split-window', '-hdP', '-F', '#{pane_id}', '-t', pane, 'bash', '-c', bash_command]).strip() self.proc = Expect(os.open(logfile, os.O_RDONLY | os.O_NONBLOCK), quiet=True) try: self.proc.expect("(rr) ", timeout=3) except TimeoutExpired: print(self.proc.sofar) raise return self def __exit__(self, exc_type, exc_value, traceback): if exc_type: self.kill() self.tempdir_obj.__exit__(exc_type, exc_value, traceback) def gdb(self, *args, **kwargs): timeout = kwargs.get('timeout', None) cmd = " ".join(map(str, args)) print("(rr-{}) {}".format(self.description, cmd)) sys.stdout.flush() expect_prompt = kwargs.get("expect_prompt", "(rr) ") while True: try: os.read(self.proc.fd, 1024) except OSError as e: if e.errno in [EAGAIN, EWOULDBLOCK]: break else: raise self.sendline(cmd) try: output = self.proc.expect(expect_prompt, timeout=timeout) except TimeoutExpired: print(self.proc.sofar) print("EXCEPTION!") sys.stdout.flush() if output.endswith(expect_prompt): output = output[:-len(expect_prompt)] if output.startswith(cmd): output = output[len(cmd):] return output.strip() def quit(self): self.gdb("set confirm off") self.sendline("quit") def gdb_int_re(self, result_re, *args): result = self.gdb(*args) return re_search_int(result_re, result) def breakpoint(self, break_arg): bp_num = self.gdb_int_re(r"Breakpoint ([0-9]+) at", "break", break_arg) self.breakpoints[break_arg] = bp_num def disable_all(self): self.gdb("disable") self.watches_set = 0 def enable(self, break_arg): self.gdb("enable", self.breakpoints[break_arg]) def enable_only(self, *breaks): self.disable_all() for break_arg in breaks: self.enable(break_arg) def condition(self, break_arg, cond): self.gdb("condition", self.breakpoints[break_arg], cond) def display(self, cmd): self.gdb("display", cmd) def checkpoint(self): return self.gdb_int_re(r"Checkpoint ([0-9]+) at", "checkpoint") def restart(self, checkpoint): self.disable_all() self.gdb("restart", checkpoint) def restart_instr(self, instr): self.restart(self.instr_to_checkpoint[instr]) def get_value(self, expr): return self.gdb_int_re(r"\$[0-9]+ = ([0-9]+)", "print/u", expr) def instr_count(self): return self.get_value("cpus->tqh_first->rr_guest_instr_count") @cached_property def instr_count_ptr(self): return self.get_value("&cpus->tqh_first->rr_guest_instr_count") def condition_instr(self, break_arg, op, instr): self.condition( break_arg, "*(uint64_t *){} {} {}".format(self.instr_count_ptr, op, instr)) def set_breakpoint_commands(self, break_num): self.gdb("commands", break_num, expect_prompt = ">") # self.gdb("p/u cpus->tqh_first->rr_guest_instr_count", expect_prompt = ">") self.gdb("call target_disas(stdout, cpu, tb->pc, tb->size, 0)", expect_prompt = ">") self.gdb("end") def display_commands(self): self.display("cpus->tqh_first->rr_guest_instr_count") self.display("cpus->tqh_first->exception_index") self.display("cpus->tqh_first->exit_request") self.gdb("set $env = ((CPUPPCState*) cpus->tqh_first->env_ptr)") self.display("$env->pending_interrupts") @cached_property def ram_ptr(self): return self.get_value( "memory_region_find(" + "get_system_memory(), 0x2000000, 1).mr->ram_block.host") def crc32_ram(self, low, size): step = 1 << 31 if size > (1 << 31) else size crc32s = 0 for start in range(low, low + size, step): crc32s ^= self.get_value("crc32(0, {} + {}, {})".format( hex(self.ram_ptr), hex(start), hex(step))) return crc32s @cached_property def ram_size(self): return self.get_value('ram_size') @cached_property def reg_size(self): return self.get_value("sizeof (({}*)0)->{}[0]".format( self.arch.cpu_state_name, self.arch.reg_name)) @cached_property def num_regs(self): return self.get_value("sizeof (({}*)0)->{}".format( self.arch.cpu_state_name, self.arch.reg_name)) // self.reg_size def env_value(self, name): return self.get_value("(({}*)cpus->tqh_first->env_ptr)->{}".format( self.arch.cpu_state_name, name)) def env_ptr(self, name): return self.get_value("&(({}*)cpus->tqh_first->env_ptr)->{}".format( self.arch.cpu_state_name, name)) def checksum(self): # NB: Only run when you are at a breakpoint in CPU thread! memory = self.crc32_ram(0, self.ram_size) regs = self.get_value("rr_checksum_regs()") return (memory, regs) def when(self): return self.gdb_int_re(r"Current event: ([0-9]+)", "when") def cont(self): return self.gdb("continue", timeout=None) def reverse_cont(self): return self.gdb("reverse-continue", timeout=None) # x86 debug registers can only watch 4 locations of 8 bytes. # we need to make sure to enforce that. # returns true if can set more watchpoints. false if we're full up. def watch_addr(self, addr, size): assert size in [1, 2, 4, 8] bits = size * 8 num = self.gdb_int_re(r"Hardware watchpoint ([0-9]+):", "watch", "*(uint{}_t *)0x{:x}".format(bits, addr)) self.watches_set += 1 if self.watches_set > 4: print() print("WARNING: Too much divergence! Not watching some diverged points.") print("(watchpoints are full...)") print() return num def watch(self, watchpoint): return self.watch_addr(*watchpoint.render(self)) def record_instr_checkpoint(self): instr_count = self.instr_count() if instr_count not in self.instr_to_checkpoint: self.instr_to_checkpoint[instr_count] = self.checkpoint() return self.instr_to_checkpoint[instr_count] # Get as close to instr as possible. def goto_rough(self, target_instr): print("Moving", self, "to instr", target_instr) current_instr = self.instr_count() if target_instr in self.instr_to_checkpoint: run_instr = target_instr else: index = self.instr_to_checkpoint.keys().bisect_left(target_instr) - 1 run_instr = self.instr_to_checkpoint.keys()[index] if current_instr > target_instr or current_instr < run_instr: self.restart(self.instr_to_checkpoint[run_instr]) # We should have now guaranteed that both will be in [run_instr, target_instr]. # Now run them forwards to as close to target_instr as we can get. # debug_counter fires every 128k instrs, so move to last debug_counter # before desired instr count. run_instr = target_instr - DEBUG_COUNTER_PERIOD current_instr = self.instr_count() if current_instr < run_instr: print("Moving from {} to {} below {}".format(current_instr, run_instr, target_instr)) self.enable_only("debug_counter") self.condition_instr("debug_counter", ">=", run_instr) self.cont() current_instr = self.instr_count() # unfortunately, we might have gone too far above. move back one # debug_counter fire if necessary. if current_instr > target_instr: print("Moving back to {}".format(target_instr)) self.enable_only("debug_counter") self.condition_instr("debug_counter", "<=", target_instr) self.reverse_cont() current_instr = self.instr_count() if current_instr != target_instr: print("Moving precisely to", target_instr) self.enable_only("cpu_loop_exec_tb") self.condition_instr("cpu_loop_exec_tb", ">=", target_instr) self.cont() # Go from beginning of program to execution of first TB after record/replay. # Return number of that checkpoint. def goto_first_tb(self): self.enable_only("rr_do_begin_record", "rr_do_begin_replay") self.cont() self.enable_only("cpu_loop_exec_tb") self.cont() return self.record_instr_checkpoint() def find_last_instr(self, cli_args, last_event): first_tb_checkpoint = self.goto_first_tb() if cli_args.instr_max is not None: instr_count_max = cli_args.instr_max else: # get last instruction in failed replay self.gdb("run", last_event, timeout=None) self.enable_only("cpu_loop_exec_tb") self.reverse_cont() # go backwards through failure signal self.reverse_cont() # land on last TB exec instr_count_max = self.instr_count() # reset replay so it is in same state as record self.restart(first_tb_checkpoint) return instr_count_max
def _initialize(self): self._tmp_dir = TempDir() self.accounts = {}
class ForkProviderTest(unittest.TestCase): def setUp(self): self._tmpdir = TempDir() self.root_path = self._tmpdir.name self.runner = MagicMock(spec=Adapter) self.provider = ForkProvider(self.runner) self.provider.initialize() def tearDown(self): self._tmpdir.dissolve() def test_instances_can_be_removed(self): user_config = UserConfig('test', join(self.root_path, 'test')) os.makedirs(join(user_config.path, 'data')) self.provider.remove(user_config) def test_that_instance_can_be_started_and_gets_initialized(self): self.provider.start(self._user_config('test')) self.runner.initialize.assert_called_with('test') self.runner.start.assert_called_with('test') def test_that_instance_cannot_be_started_twice(self): user_config = self._user_config('test') self.provider.start(user_config) self.assertRaises(InstanceAlreadyRunningError, self.provider.start, user_config) def test_that_running_instances_are_in_runnig_list(self): self._init_runner_memory_usage() for name in ['one', 'two', 'three']: self.provider.start(self._user_config(name)) self.assertEqual({'one', 'two', 'three'}, set(self.provider.list_running())) def test_that_non_existing_instance_cannot_be_stopped(self): self.assertRaises(InstanceNotRunningError, self.provider.stop, 'test') def test_that_non_started_instance_cannot_be_stopped(self): self.assertRaises(InstanceNotRunningError, self.provider.stop, UserConfig('not-started', None)) def test_that_running_instance_can_be_stopped(self): process = MagicMock(spec=ForkedProcess) self.runner.start.return_value = process user_config = self._user_config('test') self.provider.start(user_config) self.provider.stop(user_config.username) process.terminate.assert_called_once_with() def test_that_instance_cannot_be_stopped_twice(self): self.provider.start(self._user_config('test')) self.provider.stop('test') self.assertRaises(InstanceNotRunningError, self.provider.stop, 'test') def test_that_status_returns_current_port(self): process = MagicMock(spec=ForkedProcess(None, 1234)) process.port = 1234 self.runner.start.return_value = process self.provider.start(self._user_config('test')) status = self.provider.status('test') self.assertEqual({'port': 1234, 'state': 'running'}, status) def assert_config_file(self, filename, name, hashed_password, salt): with open(filename, 'r') as file: content = file.read() self.assertEqual('[agent]\nname = %s\nhashed_password = %s\nsalt = %s\n\n' % (name, hashed_password, salt), content) def test_memory_usage_zero_if_no_processes(self): self.assertEqual({'total_usage': 0, 'average_usage': 0, 'agents': []}, self.provider.memory_usage()) def test_memory_usage_with_process(self): # given process = MagicMock(spec=ForkedProcess(None, 1234)) process.port = 1234 process.memory_usage.return_value = 1024 self.runner.start.return_value = process self.provider.start(self._user_config('test')) # when usage = self.provider.memory_usage() # then self.assertEqual({'total_usage': 1024, 'average_usage': 1024, 'agents': [ {'name': 'test', 'memory_usage': 1024} ]}, usage) @patch('pixelated.provider.fork.psutil.virtual_memory') def test_that_instance_cannot_be_started_with_too_little_memory_left(self, vm_mock): # given svmem = namedtuple('svmem', ['free']) free_memory = 1024 * 1024 vm_mock.return_value = svmem(free_memory) process = MagicMock(spec=ForkedProcess) process.memory_usage.return_value = free_memory + 1 self.runner.start.return_value = process self.provider.start(self._user_config('memory monster')) second_config = self._user_config('second') # when/then self.assertRaises(NotEnoughFreeMemory, self.provider.start, second_config) process.memory_usage.return_value = free_memory - 1 self.provider.start(second_config) def _init_runner_memory_usage(self): def new_process(*args): process = MagicMock(spec=ForkedProcess) process.memory_usage.return_value = 1024 return process self.runner.start.side_effect = new_process def _user_config(self, name): path = join(self.root_path, name) os.makedirs(path) return UserConfig(name, path)
class RESTfulServerTest(unittest.TestCase): mock_provider = None ssl_config = None server = None @classmethod def setUpClass(cls): RESTfulServerTest.mock_provider = MagicMock(spec=Provider) RESTfulServerTest.mock_users = MagicMock(spec=Users) RESTfulServerTest.mock_authenticator = MagicMock(spec=Authenticator) RESTfulServerTest.ssl_config = SSLConfig(certfile(), keyfile()) RESTfulServerTest.server = RESTfulServer(RESTfulServerTest.ssl_config, RESTfulServerTest.mock_users, RESTfulServerTest.mock_authenticator, RESTfulServerTest.mock_provider) RESTfulServerTest.server.server_forever_in_backgroud() time.sleep(1) # let it get up to speed @classmethod def tearDownClass(cls): RESTfulServerTest.server.shutdown() print 'Stopped test server' def setUp(self): self.mock_provider = RESTfulServerTest.mock_provider self.mock_provider.reset_mock() self.mock_users.reset_mock() self.mock_authenticator.reset_mock() self.ssl_request = requests.Session() self.ssl_request.mount('https://', EnforceTLSv1Adapter()) self._tmpdir = TempDir() self._root_path = self._tmpdir.name def tearDown(self): self._tmpdir.dissolve() def get(self, url): return self.ssl_request.get(url, verify=cafile()) def put(self, url, data=None): if data: data = json.dumps(data) return self.ssl_request.put(url, data=data, headers={'content-type': 'application/json'}, verify=cafile()) def post(self, url, data=None): if data: data = json.dumps(data) return self.ssl_request.post(url, data=data, headers={'content-type': 'application/json'}, verify=cafile()) def delete(self, url): return self.ssl_request.delete(url, verify=cafile()) def assertSuccessJson(self, dict, response): self.assertEqual(200, response.status_code) self.assertEquals('application/json', response.headers['content-type']) self.assertEqual(dict, response.json()) def assertInternalError(self, response): self.assertEqual(500, response.status_code) def test_list_empty_agents(self): # given self.mock_provider.status.return_value = {'state': 'stopped'} self.mock_users.list.return_value = [] # when r = self.get('https://*****:*****@patch('pixelated.manager.SSLWSGIRefServerAdapter') @patch('pixelated.manager.run') # mock run call to avoid actually startng the server def test_that_ssl_server_adapter_gets_used_when_ssl_config_is_provided(self, run_mock, ssl_adapter_mock): server = RESTfulServer(RESTfulServerTest.ssl_config, RESTfulServerTest.mock_users, RESTfulServerTest.mock_authenticator, RESTfulServerTest.mock_provider) # when server.serve_forever() expected_ca_certs = None # which means system ciphers expected_ciphers = DEFAULT_CIPHERS expected_ssl_version = latest_available_ssl_version() expected_host = '127.0.0.1' expected_port = 4443 expected_certfile = certfile() expected_keyfile = keyfile() ssl_adapter_mock.assert_called_once_with(ssl_ca_certs=expected_ca_certs, ssl_ciphers=expected_ciphers, ssl_version=expected_ssl_version, host=expected_host, port=expected_port, ssl_cert_file=expected_certfile, ssl_key_file=expected_keyfile) @patch('pixelated.manager.WSGIRefServer') @patch('pixelated.manager.run') # mock run call to avoid actually startng the server def test_that_serve_forever_runs_without_ssl_context(self, run_mock, wsgiRefServer_mock): # given server = RESTfulServer(None, RESTfulServerTest.mock_users, RESTfulServerTest.mock_authenticator, RESTfulServerTest.mock_provider) # when server.serve_forever() # then wsgiRefServer_mock.assert_called_once_with(host='localhost', port=4443) def test_handles_provider_initializing(self): self.mock_users.list.return_value = ['test'] self.mock_provider.status.side_effect = ProviderInitializingException r = self.get('https://*****:*****@patch('pixelated.manager.DockerProvider') @patch('pixelated.manager.RESTfulServer') @patch('pixelated.manager.Thread') @patch('pixelated.manager.Users') @patch('pixelated.manager.LeapProvider') @patch('pixelated.manager.LeapCertificate') def test_that_initialize_happens_in_background_thread(self, leap_certificate_mock, leap_provider_mock, users_mock, thread_mock, server_mock, docker_provider_mock): # given docker_provider_mock.return_value = self.mock_provider manager = DispatcherManager(self._root_path, None, None, None, None, provider='docker') # when manager.serve_forever() # then thread_mock.assert_called_with(target=self.mock_provider.initialize) self.assertFalse(self.mock_provider.initialize.called) @patch('pixelated.manager.LeapCertificate') @patch('pixelated.manager.Authenticator') @patch('pixelated.manager.DockerProvider') @patch('pixelated.manager.RESTfulServer') @patch('pixelated.manager.Thread') @patch('pixelated.manager.Users') @patch('pixelated.manager.LeapProvider') def test_that_tls_config_gets_passed_to_authenticator(self, leap_provider_mock, users_mock, thread_mock, server_mock, docker_provider_mock, authenticator_mock, leap_certificate_mock): # given manager = DispatcherManager(self._root_path, None, None, None, 'some ca bundle', leap_provider_fingerprint='some fingerprint', provider='docker') # when manager.serve_forever() # then authenticator_mock.assert_called_once_with(users_mock.return_value, leap_provider_mock.return_value) @patch('pixelated.manager.LeapCertificate') @patch('pixelated.manager.Authenticator') @patch('pixelated.manager.DockerProvider') @patch('pixelated.manager.RESTfulServer') @patch('pixelated.manager.Thread') @patch('pixelated.manager.Users') @patch('pixelated.manager.LeapProvider') def test_that_leap_certificate_gets_downloaded_on_serve_forever(self, leap_provider_mock, users_mock, thread_mock, server_mock, docker_provider_mock, authenticator_mock, leap_certificate_mock): # given cert_file = join(self._root_path, 'ca.crt') manager = DispatcherManager(self._root_path, None, None, None, 'some ca bundle', leap_provider_fingerprint='some fingerprint', provider='docker') # when manager.serve_forever() # then leap_certificate_mock.return_value.refresh_api_ca_bundle.assert_called_once_with()
def setUp(self): self._tmpdir = TempDir() self.ssl_request = requests.Session() self.ssl_request.mount('https://', EnforceTLSv1Adapter())
class UsersTest(unittest.TestCase): def setUp(self): self._tmpdir = TempDir() self.root_path = self._tmpdir.name self.users = Users(self.root_path) def test_that_constructor_throws_exception_if_root_path_does_not_exist(self): self.assertRaises(ValueError, Users, '/does/not/exist') def tearDown(self): self._tmpdir.dissolve() def test_add_user(self): self.users.add('name') def test_user_cannot_be_added_twice(self): self.users.add('name') self.assertRaises(UserAlreadyExistsError, self.users.add, 'name') def test_list_empty(self): self.assertEqual([], self.users.list()) def test_list_single_user(self): self.users.add('name') self.assertEqual(['name'], self.users.list()) def test_get_user_config_throws_exception_if_user_not_exists(self): self.assertRaises(UserNotExistError, self.users.config, 'name') def test_add_user_creates_data_folder_and_config(self): self.users.add('name') data_path = join(self.root_path, 'name') config_file = join(data_path, 'user.cfg') self.assertTrue(exists(data_path)) self.assertTrue(exists(config_file)) def test_add_user_does_not_override_existing_config(self): data_path = join(self.root_path, 'name') config_file = join(data_path, 'user.cfg') mkdir(data_path) with open(config_file, 'w') as fd: fd.write('[section]\none = first value\ntwo = second value\n\n') self.users.add('name') data_path = join(self.root_path, 'name') config_file = join(data_path, 'user.cfg') with open(config_file, 'r') as fd: self.assertEqual('[section]\none = first value\ntwo = second value\n\n', fd.read()) def test_add_validates_username(self): self.users.add('a_name.with-valid-chars') with self.assertRaises(ValueError): self.users.add('CapitalLetters') with self.assertRaises(ValueError): self.users.add('some/name/with/slashes') with self.assertRaises(ValueError): self.users.add('name with spaces') with self.assertRaises(ValueError): self.users.add('name=with%&') def test_loads_all_existing_users_on_startup(self): user1_data_path = join(self.root_path, 'user1') mkdir(user1_data_path) user1_config_path = join(self.root_path, 'user1', 'user.cfg') with open(user1_config_path, 'w') as fd: fd.write('[section]\none = first value\ntwo = second value\n\n') users = Users(self.root_path) self.assertEqual(['user1'], users.list()) self.assertEqual('first value', users.config('user1')['section.one']) def test_update_user_config(self): self.users.add('name') config = self.users.config('name') config['foo.bar'] = 'some value' self.users.update_config(config) config = self.users.config('name') self.assertEqual('some value', config['foo.bar']) def test_has_user(self): self.users.add('name') self.assertTrue(self.users.has_user('name')) self.assertFalse(self.users.has_user('other'))
class TestCli(unittest.TestCase): key = '0b660492d98c54412d3d91818de5a2ae0b3110850a12010768b80fb277f55aa6'.decode('hex') iv = '9059d464b93397a2a98e8e1f00b596c6'.decode('hex') length = 5 characters = 'abc' domain = 'domain' username = '******' password = '******' pin = '1234' def setUp(self): self.d = TempDir() self.k = os.path.join(self.d.name, 'keyfile') d = os.path.join(self.d.name, 'database') cli.main(('init %s %s' % (self.k, d)).split(' ')) self.keyfile = Keyfile.load(self.k) def tearDown(self): self.d.dissolve() @not_raises(AttributeError) def test_init_defaults(self): with TempDir() as d: k = os.path.join(d.name, 'keyfile') d = os.path.join(d.name, 'database') cli.main(('init %s %s' % (k, d)).split(' ')) keyfile = Keyfile.load(k) self.assertEqual(k, keyfile.path) self.assertEqual(d, keyfile.database_path) self.assertEqual(Keyfile.LENGTH, keyfile.length) self.assertEqual(Keyfile.CHARACTERS, keyfile.characters) keyfile.key keyfile.iv def test_init_override(self): with TempDir() as d: k = os.path.join(d.name, 'keyfile') d = os.path.join(d.name, 'database') cli.main(('init %s %s --key %s --iv %s --length %d --characters %s' % (k, d, self.key, self.iv, self.length, self.characters)).split(' ')) keyfile = Keyfile.load(k) self.assertEqual(k, keyfile.path) self.assertEqual(d, keyfile.database_path) self.assertEqual(self.length, keyfile.length) self.assertEqual(self.characters, keyfile.characters) self.assertEqual(self.key, keyfile.key) self.assertEqual(self.iv, keyfile.iv) def test_add_defaults(self): results = cli.main( ('add %s --domain %s --username %s --pin %s' % (self.k, self.domain, self.username, self.pin)).split(' ')) with Database(self.keyfile) as d: c = d.find(domain=self.domain, username=self.username)[0] c.unlock(self.pin) self.assertEqual(results, '\n'.join([c.domain, c.username, c.plainpassword])) def test_add_override(self): results = cli.main( ('add %s --domain %s --username %s --pin %s --length %d --characters %s' % (self.k, self.domain, self.username, self.pin, self.length, self.characters)).split(' ')) parts = results.split('\n') self.assertEqual(self.length, len(parts[2])) for c in parts[2]: self.assertIn(c, self.characters) def test_add_provide_password(self): results = cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password)).split(' ')) parts = results.split('\n') self.assertEqual(self.password, parts[2]) def test_find_single_keyfile_single_result(self): cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password)).split(' ')) results = cli.main( ('find %s --domain %s --username %s --pin %s' % (self.k, self.domain, self.username, self.pin)).split(' ')) self.assertEqual(results, '\n'.join([self.domain, self.username, self.password])) def test_find_single_keyfile_multiple_results(self): cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password)).split(' ')) cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain + '2', self.username, self.pin, self.password)).split(' ')) results = cli.main( ('find %s --domain %s --username %s --pin %s' % (self.k, self.domain, self.username, self.pin)).split(' ')) self.assertEqual(results, '\n'.join([self.domain, self.username, '', self.domain + '2', self.username])) def test_find_multiple_keyfiles_single_result(self): k2 = os.path.join(self.d.name, 'keyfile2') d2 = os.path.join(self.d.name, 'database2') cli.main(('init %s %s' % (k2, d2)).split(' ')) cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password)).split(' ')) results = cli.main( ('find %s %s --domain %s --username %s --pin %s' % (self.k, k2, self.domain, self.username, self.pin)).split(' ')) self.assertEqual(results, '\n'.join([self.domain, self.username, self.password])) def test_find_multiple_keyfiles_multiple_results(self): k2 = os.path.join(self.d.name, 'keyfile2') d2 = os.path.join(self.d.name, 'database2') cli.main(('init %s %s' % (k2, d2)).split(' ')) cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password)).split(' ')) cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (k2, self.domain + '2', self.username, self.pin, self.password)).split(' ')) results = cli.main( ('find %s %s --domain %s --username %s --pin %s' % (self.k, k2, self.domain, self.username, self.pin)).split(' ')) self.assertEqual(results, '\n'.join([self.domain, self.username, '', self.domain + '2', self.username])) def test_find_single_result_no_pin(self): cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password)).split(' ')) results = cli.main( ('find %s --domain %s --username %s' % (self.k, self.domain, self.username)).split(' ')) self.assertEqual(results, '\n'.join([self.domain, self.username, '********'])) def test_find_fuzzy_match(self): cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password)).split(' ')) results = cli.main( ('find %s --domain %s --username %s --pin %s' % (self.k, self.domain[0:2], self.username, self.pin)).split(' ')) self.assertEqual(results, '\n'.join([self.domain, self.username, self.password])) def test_find_only_domain(self): cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password)).split(' ')) results = cli.main( ('find %s --domain %s --pin %s' % (self.k, self.domain, self.pin)).split(' ')) self.assertEqual(results, '\n'.join([self.domain, self.username, self.password])) def test_find_only_username(self): cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password)).split(' ')) results = cli.main( ('find %s --username %s --pin %s' % (self.k, self.username, self.pin)).split(' ')) self.assertEqual(results, '\n'.join([self.domain, self.username, self.password])) def test_update_defaults(self): results1 = cli.main( ('add %s --domain %s --username %s --pin %s' % (self.k, self.domain, self.username, self.pin)).split(' ')) results2 = cli.main( ('update %s --domain %s --username %s --pin %s' % (self.k, self.domain, self.username, self.pin)).split(' ')) with Database(self.keyfile) as d: c = d.find(domain=self.domain, username=self.username)[0] c.unlock(self.pin) print c self.assertEqual(results2, '\n'.join([c.domain, c.username, c.plainpassword])) self.assertNotEqual(results1, results2) def test_update_override(self): results1 = cli.main( ('add %s --domain %s --username %s --pin %s --length %d --characters %s' % (self.k, self.domain, self.username, self.pin, self.length, self.characters)).split(' ')) results2 = cli.main( ('update %s --domain %s --username %s --pin %s --length %d --characters %s' % (self.k, self.domain, self.username, self.pin, self.length, self.characters)).split(' ')) parts = results2.split('\n') self.assertEqual(self.length, len(parts[2])) for c in parts[2]: self.assertIn(c, self.characters) self.assertNotEqual(results1, results2) def test_update_provide_password(self): results1 = cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password)).split(' ')) results2 = cli.main( ('update %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password + '2')).split(' ')) parts = results2.split('\n') self.assertEqual(self.password + '2', parts[2]) self.assertNotEqual(results1, results2) def test_delete(self): cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password)).split(' ')) cli.main( ('delete %s --domain %s --username %s' % (self.k, self.domain, self.username)).split(' ')) results = cli.main( ('find %s --domain %s --username %s --pin %s' % (self.k, self.domain, self.username, self.pin)).split(' ')) self.assertEqual(results, '') def test_delete_not_fuzzy(self): cli.main( ('add %s --domain %s --username %s --pin %s --password %s' % (self.k, self.domain, self.username, self.pin, self.password)).split(' ')) cli.main( ('delete %s --domain %s --username %s' % (self.k, self.domain[0:2], self.username)).split(' ')) results = cli.main( ('find %s --domain %s --username %s --pin %s' % (self.k, self.domain, self.username, self.pin)).split(' ')) self.assertEqual(results, '\n'.join([self.domain, self.username, self.password]))
class RRInstance(object): def __init__(self, description, rr_replay, source_pane): self.description = description self.spawn_cmd = "{} replay {}".format( pipes.quote(cli_args.rr), pipes.quote(rr_replay)) self.source_pane = source_pane self.breakpoints = {} self.watches_set = 0 def __repr__(self): return "RRInstance({!r})".format(self.description) # Runs in child process. def sendline(self, msg): check_call(['tmux', 'send-keys', '-t', self.pane, '-l', msg]) check_call(['tmux', 'send-keys', '-t', self.pane, 'ENTER']) # Runs in child process. def kill(self): check_call(['tmux', 'kill-pane', '-t', self.pane]) def __enter__(self): self.tempdir_obj = TempDir() tempdir = self.tempdir_obj.__enter__() logfile = join(tempdir, self.description + "out") os.mkfifo(logfile) bash_command = "{} 2>&1 | tee -i --output-error=warn {} | tee -i --output-error=warn {}_log.txt".format( self.spawn_cmd, pipes.quote(logfile), self.description) self.pane = check_output([ 'tmux', 'split-window', '-hdP', '-F', '#{pane_id}', '-t', pane, 'bash', '-c', bash_command]).strip() self.proc = Expect(os.open(logfile, os.O_RDONLY | os.O_NONBLOCK), quiet=True) self.proc.expect("(rr) ") return self def __exit__(self, exc_type, exc_value, traceback): if exc_type: self.kill() self.tempdir_obj.__exit__(exc_type, exc_value, traceback) def gdb(self, *args, **kwargs): timeout = kwargs.get('timeout', None) cmd = " ".join(map(str, args)) print "(rr-{}) {}".format(self.description, cmd) sys.stdout.flush() while True: try: os.read(self.proc.fd, 1024) except OSError as e: if e.errno in [EAGAIN, EWOULDBLOCK]: break else: raise self.sendline(cmd) try: output = self.proc.expect("(rr) ", timeout=timeout) except TimeoutExpired: print self.proc.sofar print "EXCEPTION!" sys.stdout.flush() return output def quit(self): self.gdb("set confirm off") self.sendline("quit") def breakpoint(self, break_arg): result = self.gdb("break", break_arg) bp_num = int(re.search(r"Breakpoint ([0-9]+) at", result).group(1)) self.breakpoints[break_arg] = bp_num def disable_all(self): self.gdb("disable") def enable(self, break_arg): self.gdb("enable", self.breakpoints[break_arg]) def condition(self, break_arg, cond): self.gdb("condition", self.breakpoints[break_arg], cond) def condition_instr(self, break_arg, op, instr): if not hasattr(self, 'instr_count_ptr'): self.instr_count_ptr = self.get_value( "&cpus->tqh_first->rr_guest_instr_count") self.condition( break_arg, "*(uint64_t *){} {} {}".format(self.instr_count_ptr, op, instr)) def get_value(self, value_str): result = self.gdb("print/u", value_str) re_result = re.search(r"\$[0-9]+ = ([0-9]+)", result) if re_result: return long(re_result.group(1)) else: print "get_value failed. result:", result raise RuntimeError("get_value") def instr_count(self): return self.get_value("cpus->tqh_first->rr_guest_instr_count") def ram_ptr(self): if not hasattr(self, '_ram_ptr'): self._ram_ptr = self.get_value( "memory_region_find(" + "get_system_memory(), 0x2000000, 1).mr->ram_block.host") return self._ram_ptr def crc32_ram(self, low, size): step = 1 << 31 if size > (1 << 31) else size crc32s = 0 for start in range(low, low + size, step): crc32s ^= self.get_value("crc32(0, {} + {}, {})".format( hex(self.ram_ptr()), hex(start), hex(step))) return crc32s def ram_size(self): if not hasattr(self, '_ram_size'): self._ram_size = self.get_value('ram_size') return self._ram_size def checksum(self): # NB: Only run when you are at a breakpoint in CPU thread! memory = self.crc32_ram(0, self.ram_size()) regs = self.get_value("rr_checksum_regs()") return (memory, regs) def when(self): result = self.gdb("when") re_result = re.search(r"Current event: ([0-9]+)", result) if re_result: return int(re_result.group(1)) else: print "when failed. result:", result raise RuntimeError("when") def cont(self): self.gdb("continue", timeout=None) def reverse_cont(self): self.gdb("reverse-continue", timeout=None) def run_event(self, event): self.gdb("run", event, timeout=None) # x86 debug registers can only watch 4 locations of 8 bytes. # we need to make sure to enforce that. # returns true if can set more watchpoints. false if we're full up. def watch(self, addr, size): assert size in [1, 2, 4, 8] bits = size * 8 self.gdb("watch *(uint{}_t *)0x{:x}".format(bits, addr)) self.watches_set += 1 if self.watches_set >= 4: print print "WARNING: Too much divergence! Not watching some diverged points." print "(watchpoints are full...)" print # watch a location in guest ram. def watch_ram(self, ram_addr, size): self.watch(self.ram_ptr() + ram_addr, size) # Get as close to instr as possible. def goto(self, target_instr): print "Moving", self, "to instr", target_instr self.disable_all() current_instr = self.instr_count() if target_instr in instr_to_event: run_instr = target_instr else: index = instr_to_event.keys().bisect_left(target_instr) - 1 run_instr = instr_to_event.keys()[index] if current_instr > target_instr or current_instr < run_instr: self.run_event(instr_to_event[run_instr][self]) # We should have now guaranteed that both will be in [run_instr, target_instr]. # Now run them forwards to as close to target_instr as we can get. # debug_counter fires every 128k instrs, so move to last debug_counter # before desired instr count. run_instr = target_instr - DEBUG_COUNTER_PERIOD current_instr = self.instr_count() if current_instr < run_instr: print "Moving from {} to {} below {}".format(current_instr, run_instr, target_instr) self.enable("debug_counter") self.condition_instr("debug_counter", ">=", run_instr) self.cont() # unfortunately, we might have gone too far above. move back one # debug_counter fire if necessary. current_instr = self.instr_count() if current_instr > target_instr: print "Moving back to {}".format(target_instr) self.enable("debug_counter") self.condition_instr("debug_counter", "<=", target_instr) self.reverse_cont() current_instr = self.instr_count() if current_instr != target_instr: print "Moving precisely to", target_instr self.disable_all() self.enable("cpu_loop_exec_tb") self.condition_instr("cpu_loop_exec_tb", ">=", target_instr) self.cont() # Go from beginning of program to execution of first TB after record/replay. def goto_first_tb(self): self.disable_all() self.enable("rr_do_begin_record") self.enable("rr_do_begin_replay") self.cont() self.enable("cpu_loop_exec_tb") self.cont() def find_last_instr(self, cli_args, last_event): self.goto_first_tb() if cli_args.instr_max: instr_count_max = int(cli_args.instr_max) else: # get last instruction in failed replay self.run_event(last_event) self.disable_all() self.enable("cpu_loop_exec_tb") self.reverse_cont() self.reverse_cont() instr_count_max = self.instr_count() # reset replay so it is in same state as record self.run_event(0) self.goto_first_tb() return instr_count_max
def setUp(self): self.d = TempDir() self.k = os.path.join(self.d.name, 'keyfile') d = os.path.join(self.d.name, 'database') cli.main(('init %s %s' % (self.k, d)).split(' ')) self.keyfile = Keyfile.load(self.k)
class TestDatabase(unittest.TestCase): domain = 'domain' username = '******' plainpassword = '******' cipherpassword = '******'.decode('hex') pin = '1234' key = hashlib.sha256(domain + username + pin).digest() iv = 'a3206f4194d1d7a252a9fe24b7b063b9'.decode('hex') def setUp(self): self.d = TempDir() self.keyfile_path = os.path.join(self.d.name, 'keyfile') self.database_path = os.path.join(self.d.name, 'database') self.keyfile = K.create( self.keyfile_path, self.database_path) D.create(self.keyfile) def tearDown(self): self.d.dissolve() @raises(IOError) def test_create_database_exists(self): D.create(self.keyfile) @raises(AttributeError) def test_enter_missing_key(self): del self.keyfile.key with D(self.keyfile): pass @raises(ValueError) def test_enter_invalid_key(self): self.keyfile.key = 'not a key' with D(self.keyfile): pass @raises(ValueError) def test_enter_incorrect_key(self): self.keyfile.key = '1b660492d98c54412d3d91818de5a2ae0b3110850a12010768b80fb277f55aa5'.decode('hex') with D(self.keyfile): pass @raises(AttributeError) def test_enter_missing_iv(self): del self.keyfile.iv with D(self.keyfile): pass @raises(ValueError) def test_enter_invalid_iv(self): self.keyfile.iv = 'not an iv' with D(self.keyfile): pass @raises(ValueError) def test_enter_incorrect_iv(self): self.keyfile.iv = 'a059d464b93397a2a98e8e1f00b596c7'.decode('hex') with D(self.keyfile): pass @raises(AttributeError) def test_enter_missing_path(self): del self.keyfile.database_path with D(self.keyfile): pass @raises(IOError) def test_enter_unreadable_path(self): self.keyfile.database_path = '/nopermission' with D(self.keyfile): pass @raises(ValueError) def test_enter_not_a_database_path(self): self.keyfile.database_path = os.path.join(self.d.name, 'database') with open(self.keyfile.database_path, 'wb') as f: f.write(Random.new().read(10)) with D(self.keyfile): pass def test_exit_saves_changes(self): c = C(self.domain, self.username, cipherpassword=self.cipherpassword, iv=self.iv) with D(self.keyfile) as d: d.add(c, self.pin) with D(self.keyfile) as d: self.assertEqual( [c], d.find(self.domain, self.username)) def test_add(self): c1 = C(self.domain, self.username, cipherpassword=self.cipherpassword, iv=self.iv) with D(self.keyfile) as d: d.add(c1, self.pin) c2 = d.find(self.domain, self.username)[0] print c1 print c2 self.assertEqual(self.plainpassword, c2.unlock(self.pin)) @raises(sqlite3.IntegrityError) def test_add_domain_username_exists(self): c = C(self.domain, self.username, cipherpassword=self.cipherpassword, iv=self.iv) with D(self.keyfile) as d: d.add(c, self.pin) d.add(c, self.pin) def test_find_none(self): with D(self.keyfile) as d: self.assertEqual([], d.find()) def test_find_domain(self): c1 = C('d', 'u', plainpassword='******') c2 = C('d', 'v', plainpassword='******') c3 = C('e', 'u', plainpassword='******') with D(self.keyfile) as d: d.add(c1, self.pin) d.add(c2, self.pin) d.add(c3, self.pin) results = d.find(domain='d') self.assertIn(c1, results) self.assertIn(c2, results) self.assertNotIn(c3, results) def test_find_username(self): c1 = C('d', 'u', plainpassword='******') c2 = C('d', 'v', plainpassword='******') c3 = C('e', 'u', plainpassword='******') with D(self.keyfile) as d: d.add(c1, self.pin) d.add(c2, self.pin) d.add(c3, self.pin) results = d.find(username='******') self.assertIn(c1, results) self.assertNotIn(c2, results) self.assertIn(c3, results) def test_find_fuzzy_start(self): c1 = C('domain', 'u', plainpassword='******') c2 = C('e', 'v', plainpassword='******') with D(self.keyfile) as d: d.add(c1, self.pin) d.add(c2, self.pin) results = d.find(domain='dom') self.assertIn(c1, results) self.assertNotIn(c2, results) def test_find_fuzzy_middle(self): c1 = C('domain', 'u', plainpassword='******') c2 = C('e', 'v', plainpassword='******') with D(self.keyfile) as d: d.add(c1, self.pin) d.add(c2, self.pin) results = d.find(domain='mai') self.assertIn(c1, results) self.assertNotIn(c2, results) def test_find_fuzzy_end(self): c1 = C('domain', 'u', plainpassword='******') c2 = C('e', 'v', plainpassword='******') with D(self.keyfile) as d: d.add(c1, self.pin) d.add(c2, self.pin) results = d.find(domain='ain') self.assertIn(c1, results) self.assertNotIn(c2, results) def test_update(self): c1 = C('domain', 'u', plainpassword='******') c2 = C('domain', 'u', plainpassword='******') with D(self.keyfile) as d: d.add(c1, self.pin) d.update(c2, self.pin) c3 = d.find(domain='domain')[0] self.assertEqual(c2.plainpassword, c3.unlock(self.pin)) self.assertNotEqual(c1.plainpassword, c3.unlock(self.pin)) def test_delete(self): c = C('domain', 'u', plainpassword='******') with D(self.keyfile) as d: d.add(c, self.pin) d.delete(c) self.assertEqual([], d.find(domain='domain'))
def setUp(self): self._tmpdir = TempDir() self.root_path = self._tmpdir.name self._adapter = PixelatedDockerAdapter()
def setUp(self): self._tmpdir = TempDir() self.root_path = self._tmpdir.name self.users = Users(self.root_path)
def setUp(self): self.tempdir = TempDir() self.agent_home = self.tempdir.name
class SearchEngineTest(unittest.TestCase): def setUp(self): self.tempdir = TempDir() self.user_home = self.tempdir.name def tearDown(self): self.tempdir.dissolve() def test_headers_encoding(self): # given se = SearchEngine(INDEX_KEY, self.user_home) headers = { 'From': '*****@*****.**', 'To': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Cc': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Subject': 'Some test mail', } # when se.index_mail(LeapMail('mailid', 'INBOX', headers=headers)) # test_helper.pixelated_mail(extra_headers=headers, chash='mailid')) result = se.search('folker') self.assertEqual((['mailid'], 1), result) def test_contents_encoding_accents(self): # given se = SearchEngine(INDEX_KEY, self.user_home) headers = { 'From': '*****@*****.**', 'To': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Cc': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Subject': 'Some test mail', } body = "When doing the search, it's not possible to find words with graphical accents, e.g.: 'coração', 'é', 'Fièvre', La Pluie d'été, 'não'." # when se.index_mail(LeapMail('mailid', 'INBOX', headers=headers, body=body)) # test_helper.pixelated_mail(extra_headers=headers, chash='mailid')) result = se.search(u"'coração', 'é',") self.assertEqual((['mailid'], 1), result) result = se.search(u"Fièvre") self.assertEqual((['mailid'], 1), result) result = se.search(u"été") self.assertEqual((['mailid'], 1), result) def test_contents_encoding_special_characters(self): # given se = SearchEngine(INDEX_KEY, self.user_home) headers = { 'From': '*****@*****.**', 'To': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Cc': '=?utf-8?b?IsOEw7zDtiDDlsO8w6QiIDxmb2xrZXJAcGl4ZWxhdGVkLXByb2plY3Qub3Jn?=\n =?utf-8?b?PiwgRsO2bGtlciA8Zm9sa2VyQHBpeGVsYXRlZC1wcm9qZWN0Lm9yZz4=?=', 'Subject': 'Some test mail', } body = "When doing the search, 您好 أهلا" # when se.index_mail(LeapMail('mailid', 'INBOX', headers=headers, body=body)) # test_helper.pixelated_mail(extra_headers=headers, chash='mailid')) result = se.search(u"您好") self.assertEqual((['mailid'], 1), result) result = se.search(u"أهلا") self.assertEqual((['mailid'], 1), result)
class AppTestClient(object): INDEX_KEY = ( "\xde3?\x87\xff\xd9\xd3\x14\xf0\xa7>\x1f%C{\x16.\\\xae\x8c\x13\xa7\xfb\x04\xd4]+\x8d_\xed\xd1\x8d\x0bI" "\x8a\x0e\xa4tm\xab\xbf\xb4\xa5\x99\x00d\xd5w\x9f\x18\xbc\x1d\xd4_W\xd2\xb6\xe8H\x83\x1b\xd8\x9d\xad" ) ACCOUNT = "test" MAIL_ADDRESS = "*****@*****.**" def _initialize(self): self._tmp_dir = TempDir() self.accounts = {} @defer.inlineCallbacks def start_client(self, mode=UserAgentMode(is_single_user=True)): self._initialize() self._mode = mode self._test_account = AppTestAccount(self.ACCOUNT, self._tmp_dir.name) yield self._test_account.start() self.cleanup = lambda: self._tmp_dir.dissolve() # copy fields for single user tests self.soledad = self._test_account.soledad self.search_engine = self._test_account.search_engine self.keymanager = self._test_account.keymanager self.mail_sender = self._test_account.mail_sender self.mail_store = self._test_account.mail_store self.attachment_store = self._test_account.attachment_store self.draft_service = self._test_account.draft_service self.leap_session = self._test_account.leap_session self.feedback_service = self._test_account.feedback_service self.mail_service = self._test_account.mail_service self.account = self._test_account.account if mode.is_single_user: self.service_factory = SingleUserServicesFactory(mode) services = self._test_account.services self.service_factory.add_session("someuserid", services) self.resource = RootResource(self.service_factory) self.resource.initialize() else: self.service_factory = StubServicesFactory(self.accounts, mode) provider = mock() provider.config = LeapConfig(self._tmp_dir.name) self.resource = set_up_protected_resources( RootResource(self.service_factory), provider, self.service_factory, checker=StubSRPChecker(provider) ) @defer.inlineCallbacks def create_user(self, account_name): if self._mode.is_single_user: raise Exception("Not supported in single user mode") account = AppTestAccount(account_name, self._tmp_dir.name) yield account.start() self.accounts[account_name] = account def _render(self, request, as_json=True): def get_str(_str): return json.loads(_str) if as_json else _str def get_request_written_data(_=None): written_data = request.getWrittenData() if written_data: return get_str(written_data) resource = getChildForRequest(self.resource, request) result = resource.render(request) if isinstance(result, basestring): return get_str(result), request # result is NOT_DONE_YET d = succeed(request) if request.finished else request.notifyFinish() d.addCallback(get_request_written_data) return d, request def listenTCP(self, port=4567, host="127.0.0.1"): reactor.listenTCP(port, PixelatedSite(self.resource), interface=host) def run_on_a_thread(self, logfile="/tmp/app_test_client.log", port=4567, host="127.0.0.1"): def _start(): self.listenTCP(port, host) reactor.run() process = multiprocessing.Process(target=_start) process.start() time.sleep(1) return lambda: process.terminate() def stop(self): reactor.stop() def get(self, path, get_args="", as_json=True, ajax=True, csrf="token"): request = request_mock(path, ajax=ajax, csrf=csrf) request.args = get_args return self._render(request, as_json) def post(self, path, body="", headers=None, ajax=True, csrf="token"): headers = headers or {"Content-Type": "application/json"} request = request_mock(path=path, method="POST", body=body, headers=headers, ajax=ajax, csrf=csrf) return self._render(request) def put(self, path, body, ajax=True, csrf="token"): request = request_mock( path=path, method="PUT", body=body, headers={"Content-Type": ["application/json"]}, ajax=ajax, csrf=csrf ) return self._render(request) def delete(self, path, body="", ajax=True, csrf="token"): request = request_mock( path=path, body=body, headers={"Content-Type": ["application/json"]}, method="DELETE", ajax=ajax, csrf=csrf ) return self._render(request) @defer.inlineCallbacks def add_mail_to_inbox(self, input_mail): mail = yield self.mail_store.add_mail("INBOX", input_mail.raw) defer.returnValue(mail) def account_for(self, username): return self.accounts[username] def add_mail_to_user_inbox(self, input_mail, username): return self.account_for(username).mail_store.add_mail("INBOX", input_mail.raw) @defer.inlineCallbacks def add_multiple_to_mailbox( self, num, mailbox="", flags=[], tags=[], to="*****@*****.**", cc="*****@*****.**", bcc="*****@*****.**" ): mails = [] yield self.mail_store.add_mailbox(mailbox) for _ in range(num): builder = MailBuilder().with_status(flags).with_tags(tags).with_to(to).with_cc(cc).with_bcc(bcc) builder.with_body(str(random.random())) input_mail = builder.build_input_mail() mail = yield self.mail_store.add_mail(mailbox, input_mail.raw) if tags: mail.tags |= set(tags) if flags: for flag in flags: mail.flags.add(flag) if tags or flags: yield self.mail_store.update_mail(mail) mails.append(mail) defer.returnValue(mails) def _create_mail_sender(self): mail_sender = Mock() mail_sender.sendmail.side_effect = lambda mail: succeed(mail) return mail_sender def _generate_soledad_test_folder_name(self, soledad_test_folder="/tmp/soledad-test/test"): return os.path.join(soledad_test_folder, str(uuid.uuid4())) def get_mails_by_tag(self, tag, page=1, window=100): tags = "tag:%s" % tag return self.search(tags, page, window) @defer.inlineCallbacks def search(self, query, page=1, window=100): res, _ = self.get("/mails", {"q": [query], "w": [str(window)], "p": [str(page)]}) res = yield res defer.returnValue([ResponseMail(m) for m in res["mails"]]) @defer.inlineCallbacks def get_mails_by_mailbox_name(self, mbox_name): mail_ids = yield self.mail_store.get_mailbox_mail_ids(mbox_name) mails = yield self.mail_store.get_mails(mail_ids) defer.returnValue(mails) @defer.inlineCallbacks def get_attachment(self, ident, encoding, filename=None, content_type=None, ajax=True, csrf="token"): params = {"encoding": [encoding]} if filename: params["filename"] = [filename] if content_type: params["content_type"] = [content_type] deferred_result, req = self.get("/attachment/%s" % ident, params, as_json=False, ajax=ajax, csrf=csrf) res = yield deferred_result defer.returnValue((res, req)) @defer.inlineCallbacks def post_attachment(self, data, headers): deferred_result, req = self.post("/attachment", body=data, headers=headers) res = yield deferred_result defer.returnValue((res, req)) def put_mail(self, data): res, req = self.put("/mails", data) return res, req def post_tags(self, mail_ident, tags_json): res, req = self.post("/mail/%s/tags" % mail_ident, tags_json) return res def get_tags(self, **kwargs): res, req = self.get("/tags", kwargs) return res def get_mail(self, mail_ident): res, req = self.get("/mail/%s" % mail_ident) return res def delete_mail(self, mail_ident): res, req = self.delete("/mail/%s" % mail_ident) return res def delete_mails(self, idents): res, req = self.post("/mails/delete", json.dumps({"idents": idents})) return res def mark_many_as_unread(self, idents): res, req = self.post("/mails/unread", json.dumps({"idents": idents})) return res def mark_many_as_read(self, idents): res, req = self.post("/mails/read", json.dumps({"idents": idents})) return res def get_contacts(self, query): res, req = self.get("/contacts", get_args={"q": query}) return res
def setUp(self): self.tempdir = TempDir() self.user_home = self.tempdir.name
class SmokeTest(unittest.TestCase): __slots__ = ('_run_method', '_shutdown_method', '_thread_name', '_thread') class Server(object): def __init__(self, run_method, shutdown_method, thread_name=None): self._run_method = run_method self._shutdown_method = shutdown_method self._thread_name = thread_name self._thread = None def _start_server(self): self._thread = threading.Thread(target=self._run_method) self._thread.setDaemon(True) if self._thread_name: self._thread.setName(self._thread_name) self._thread.start() def __enter__(self): self._start_server() time.sleep(0.3) # let server start return self def __exit__(self, exc_type, exc_val, exc_tb): self._shutdown_method() self._thread.join() self._kill_subprocesses() def _kill_subprocesses(self): for child in psutil.Process(os.getpid()).children(): try: p = psutil.Process(child.pid) p.kill() except psutil.Error: pass def setUp(self): self._tmpdir = TempDir() self.ssl_request = requests.Session() self.ssl_request.mount('https://', EnforceTLSv1Adapter()) def tearDown(self): self._tmpdir.dissolve() def _dispatcher_manager(self): fake_mailpile = os.path.join(os.path.dirname(__file__), 'fake_mailpile.py') ssl_config = SSLConfig(certfile(), keyfile()) provider_ca = None server = DispatcherManager(self._tmpdir.name, fake_mailpile, ssl_config, 'leap provider hostname', provider_ca, mailpile_virtualenv=INHERIT) return SmokeTest.Server(server.serve_forever, server.shutdown, thread_name='PixelatedServerManager') def _dispatcher_proxy(self): dispatcher = DispatcherProxy(PixelatedDispatcherClient('localhost', DEFAULT_PORT, cacert=cafile(), assert_hostname=False), port=12345, certfile=certfile(), keyfile=keyfile()) return SmokeTest.Server(dispatcher.serve_forever, dispatcher.shutdown, thread_name='PixelatedDispatcherProxy') def _method(self, method, url, form_data=None, json_data=None, timeout=2.0): if json_data: headers = {'content-type': 'application/json'} data = json.dumps(json_data) cookies = None else: cookies = {'_xsrf': '2|7586b241|47c876d965112a2f547c63c95cbc44b1|1402910163'} headers = None data = form_data.copy() data['_xsrf'] = '2|7586b241|47c876d965112a2f547c63c95cbc44b1|1402910163' return method(url, data=data, headers=headers, cookies=cookies, timeout=timeout, verify=cafile()) def get(self, url): return self.ssl_request.get(url, verify=cafile()) def put(self, url, form_data=None, json_data=None): return self._method(self.ssl_request.put, url, form_data=form_data, json_data=json_data) def post(self, url, form_data=None, json_data=None): return self._method(self.ssl_request.post, url, form_data=form_data, json_data=json_data) def test_dispatcher_run(self): with self._dispatcher_manager(): self.assertSuccess( self.post('https://localhost:4443/agents', json_data={'name': 'test', 'password': '******'})) self.assertSuccess(self.get('https://localhost:4443/agents'), json_body={ 'agents': [{'name': 'test', 'state': 'stopped', 'uri': 'http://localhost:4443/agents/test'}]}) self.assertSuccess( self.put('https://localhost:4443/agents/test/state', json_data={'state': 'running'})) self.assertSuccess(self.get('https://localhost:4443/agents/test/runtime'), json_body={'state': 'running', 'port': 5000}) time.sleep(2) # let mailpile start self.assertSuccess(self.get('http://localhost:5000/')) self.assertSuccess( self.put('https://localhost:4443/agents/test/state', json_data={'state': 'stopped'})) def test_dispatcher_starts(self): with self._dispatcher_proxy(): self.assertSuccess(self.get('https://localhost:12345/auth/login')) def test_server_dispatcher_combination(self): with self._dispatcher_manager(): with self._dispatcher_proxy(): # add user self.assertSuccess( self.post('https://localhost:4443/agents', json_data={'name': 'test', 'password': '******'})) # try to login with agent down # self.assertError(302, self.post('https://localhost:12345/auth/login', # form_data={'username': '******', 'password': '******'})) # start agent self.assertSuccess( self.put('https://localhost:4443/agents/test/state', json_data={'state': 'running'})) # let mailpile start time.sleep(1) self.assertMemoryUsage( self.get('https://localhost:4443/stats/memory_usage')) try: # try to login with agent up self.assertSuccess(self.post('https://localhost:12345/auth/login', form_data={'username': '******', 'password': '******'}), body='Hello World!') finally: # shutdown mailple self.put('https://localhost:4443/agents/test/state', json_data={'state': 'stopped'}) def assertSuccess(self, response, body=None, json_body=None): status = response.status_code self.assertTrue(200 <= status < 300, msg='%d: %s' % (response.status_code, response.reason)) if body: self.assertEqual(body, response.content) if json_body: self.assertEqual(json_body, response.json()) def assertError(self, error_code, response): self.assertEqual(error_code, response.status_code, 'Expected status code %d but got %d' % (error_code, response.status_code)) def assertMemoryUsage(self, response): self.assertSuccess(response) usage = response.json() self.assertEqual(1, len(usage['agents']))
class DockerProviderTest(unittest.TestCase): def setUp(self): self._tmpdir = TempDir() self.root_path = self._tmpdir.name self._adapter = PixelatedDockerAdapter() def tearDown(self): self._tmpdir.dissolve() def test_constructor_expects_docker_url(self): DockerProvider(self.root_path, self._adapter, 'leap_provider', 'some docker url') @patch('pixelated.provider.docker.docker.Client') def test_initialize_builds_docker_image(self, docker_mock): # given client = docker_mock.return_value client.images.return_value = [] dockerfile = pkg_resources.resource_string('pixelated.resources', 'Dockerfile.pixelated') # when DockerProvider(self.root_path, self._adapter, 'leap_provider', 'some leap ca', 'some docker url').initialize() # then docker_mock.assert_called_once_with(base_url="some docker url") client.build.assert_called_once_with(path=None, fileobj=StringIOMatcher(dockerfile), tag='pixelated:latest') @patch('pixelated.provider.docker.docker.Client') def test_initialize_skips_image_build_if_available(self, docker_mock): # given client = docker_mock.return_value client.images.return_value = [{'Created': 1404833111, 'VirtualSize': 297017244, 'ParentId': '57885511c8444c2b89743bef8b89eccb65f302b2a95daa95dfcc9b972807b6db', 'RepoTags': ['pixelated:latest'], 'Id': 'b4f10a2395ab8dfc5e1c0fae26fa56c7f5d2541debe54263105fe5af1d263189', 'Size': 181956643}] # when DockerProvider(self.root_path, self._adapter, 'leap_provider', 'some docker url').initialize() # then self.assertFalse(client.build.called) @patch('pixelated.provider.docker.docker.Client') def test_reports_initializing_while_initialize_is_running(self, docker_mock): # given client = docker_mock.return_value client.images.return_value = [] def build(path, fileobj, tag): sleep(0.2) return [] client.build.side_effect = build provider = DockerProvider(self.root_path, self._adapter, 'some provider', 'some provider ca', 'some docker url') self.assertTrue(provider.initializing) # when t = Thread(target=provider.initialize) # move to thread so that initializing behaviour is observable t.start() # then sleep(0.1) self.assertTrue(provider.initializing) t.join() self.assertFalse(provider.initializing) @patch('pixelated.provider.docker.LeapProvider') @patch('pixelated.provider.docker.LeapSecureRemotePassword') def test_throws_initializing_exception_while_initializing(self, leap_provider_mock, leap_srp_mock): # given provider = DockerProvider(self.root_path, self._adapter, 'provider url', 'provider ca', 'some docker url') # when/then self.assertRaises(ProviderInitializingException, provider.start, 'test') self.assertRaises(ProviderInitializingException, provider.add, 'test', 'password') self.assertRaises(ProviderInitializingException, provider.remove, 'test') self.assertRaises(ProviderInitializingException, provider.list) self.assertRaises(ProviderInitializingException, provider.list_running) self.assertRaises(ProviderInitializingException, provider.stop, 'test') self.assertRaises(ProviderInitializingException, provider.status, 'test') self.assertRaises(ProviderInitializingException, provider.authenticate, 'test', 'password') self.assertRaises(ProviderInitializingException, provider.memory_usage) def test_add(self): self._create_initialized_provider(self.root_path, self._adapter, 'some docker url').add('test', 'password') instance_path = join(self.root_path, 'test') data_dir = join(instance_path, 'data') cfg_file = join(instance_path, BaseProvider.CFG_FILE_NAME) self.assertTrue(isdir(instance_path), 'No folder for user has been created') self.assertTrue(isdir(data_dir), 'No folder for pixelated has been created') self.assertTrue(isfile(cfg_file), 'No config file had been created') @patch('pixelated.provider.docker.docker.Client') def test_that_non_existing_instance_cannot_be_started(self, docker_mock): provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') self.assertRaises(InstanceNotFoundError, provider.start, 'test') @patch('pixelated.provider.docker.docker.Client') def test_that_instance_can_be_started(self, docker_mock): client = docker_mock.return_value provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') prepare_pixelated_container = MagicMock() container = MagicMock() client.create_container.side_effect = [prepare_pixelated_container, container] client.wait.return_value = 0 provider.add('test', 'password') provider.start('test') client.create_container.assert_any_call('pixelated', '/bin/bash -l -c "/usr/bin/pixelated-user-agent --host 0.0.0.0 --port 4567 --dispatcher /mnt/user/credentials-fifo"', name='test', volumes=['/mnt/user'], ports=[4567], environment={'DISPATCHER_LOGOUT_URL': '/auth/logout'}) client.create_container.assert_any_call('pixelated', '/bin/true', name='pixelated_prepare', volumes=['/mnt/user'], environment={'DISPATCHER_LOGOUT_URL': '/auth/logout'}) data_path = join(self.root_path, 'test', 'data') client.start.assert_any_call(container, binds={data_path: {'bind': '/mnt/user', 'ro': False}}, port_bindings={4567: 5000}) client.start.assert_any_call(prepare_pixelated_container, binds={data_path: {'bind': '/mnt/user', 'ro': False}}) @patch('pixelated.provider.docker.docker.Client') def test_that_existing_container_gets_reused(self, docker_mock): client = docker_mock.return_value client.containers.side_effect = [[], [{u'Status': u'Exited (-1) About an hour ago', u'Created': 1405332375, u'Image': u'pixelated:latest', u'Ports': [], u'Command': u'/bin/bash -l -c "/usr/bin/pixelated-user-agent --dispatcher"', u'Names': [u'/test'], u'Id': u'adfd4633fc42734665d7d98076b19b5f439648678b3b76db891f9d5072af50b6'}]] provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') container = MagicMock() client.create_container.return_value = container provider.add('test', 'password') provider.start('test') client.containers.assert_called_with(all=True) self.assertFalse(client.build.called) @patch('pixelated.provider.docker.docker.Client') def test_running_containers_empty_if_none_started(self, docker_mock): client = docker_mock.return_value client.containers.return_value = [] provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') running = provider.list_running() self.assertEqual([], running) @patch('pixelated.provider.docker.docker.Client') def test_running_returns_running_container(self, docker_mock): client = docker_mock.return_value client.containers.side_effect = [[], [], [{u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'}]] client.wait.return_value = 0 provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') provider.add('test', 'password') provider.start('test') running = provider.list_running() self.assertEqual(['test'], running) @patch('pixelated.provider.docker.docker.Client') def test_a_container_cannot_be_started_twice(self, docker_mock): client = docker_mock.return_value client.containers.side_effect = [[], [], [{u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'}]] client.wait.return_value = 0 provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') provider.add('test', 'password') provider.start('test') self.assertRaises(InstanceAlreadyRunningError, provider.start, 'test') @patch('pixelated.provider.docker.docker.Client') def test_stopping_not_running_container_raises_value_error(self, docker_mock): client = docker_mock.return_value client.containers.return_value = [] provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') provider.add('test', 'password') self.assertRaises(InstanceNotRunningError, provider.stop, 'test') @patch('pixelated.provider.docker.docker.Client') def test_stop_running_container(self, docker_mock): # given client = docker_mock.return_value container = {u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 5000, u'PrivatePort': 4567}], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'} client.containers.side_effect = [[], [], [container], [container], [container]] client.wait.return_value = 0 provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') provider.add('test', 'password') provider.start('test') # when provider.stop('test') # then client.stop.assert_called_once_with(container, timeout=10) self.assertFalse(5000 in provider._used_ports()) @patch('pixelated.provider.docker.docker.Client') def test_stop_running_container_calls_kill_if_stop_times_out(self, docker_mock): # given client = docker_mock.return_value container = {u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 5000, u'PrivatePort': 4567}], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'} client.containers.side_effect = [[], [], [container], [container], [container]] client.wait.return_value = 0 client.stop.side_effect = requests.exceptions.Timeout provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') provider.add('test', 'password') provider.start('test') # when provider.stop('test') # then client.stop.assert_called_once_with(container, timeout=10) client.kill.assert_called_once_with(container) @patch('pixelated.provider.docker.docker.Client') def test_status_stopped(self, docker_mock): provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') provider.add('test', 'password') self.assertEqual({'state': 'stopped'}, provider.status('test')) @patch('pixelated.provider.docker.docker.Client') def test_status_running(self, docker_mock): client = docker_mock.return_value container = {u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 5000, u'PrivatePort': 33144}], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'} client.containers.side_effect = [[], [], [container], [container]] client.wait.return_value = 0 provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') provider.add('test', 'password') provider.start('test') self.assertEqual({'state': 'running', 'port': 5000}, provider.status('test')) def test_empty_list(self): provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') self.assertEqual([], provider.list()) def test_list(self): provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') provider.add('test', 'password') self.assertEqual(['test'], provider.list()) @patch('pixelated.provider.docker.Process') @patch('pixelated.provider.docker.docker.Client') def test_memory_usage(self, docker_mock, process_mock): # given container = {u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 5000, u'PrivatePort': 33144}], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'} info = {u'HostsPath': u'/var/lib/docker/containers/f2cdb04277e9e056c610240edffe8ff94ae272e462312c270e5300975d60af89/hosts', u'Created': u'2014-07-14T13:17:46.17558664Z', u'Image': u'f63df19194389be6481a174b36d291c483c8982d5c07485baa71a46b7f6582c8', u'Args': [], u'Driver': u'aufs', u'HostConfig': {u'PortBindings': {u'4567/tcp': [{u'HostPort': u'5000', u'HostIp': u'0.0.0.0'}]}, u'NetworkMode': u'', u'Links': None, u'LxcConf': None, u'ContainerIDFile': u'', u'Binds': [u'/tmp/multipile/folker:/mnt/user:rw'], u'PublishAllPorts': False, u'Dns': None, u'DnsSearch': None, u'Privileged': False, u'VolumesFrom': None}, u'MountLabel': u'', u'VolumesRW': {u'/mnt/user': True}, u'State': {u'Pid': 3250, u'Paused': False, u'Running': True, u'FinishedAt': u'0001-01-01T00:00:00Z', u'StartedAt': u'2014-07-14T13:17:46.601922899Z', u'ExitCode': 0}, u'ExecDriver': u'native-0.2', u'ResolvConfPath': u'/etc/resolv.conf', u'Volumes': {u'/mnt/user': u'/tmp/multipile/folker'}, u'Path': u'/bin/bash -l -c "/usr/bin/pixelated-user-agent --dispatcher"', u'HostnamePath': u'/var/lib/docker/containers/f2cdb04277e9e056c610240edffe8ff94ae272e462312c270e5300975d60af89/hostname', u'ProcessLabel': u'', u'Config': {u'MemorySwap': 0, u'Hostname': u'f2cdb04277e9', u'Entrypoint': None, u'PortSpecs': None, u'Memory': 0, u'OnBuild': None, u'OpenStdin': False, u'Cpuset': u'', u'Env': [u'HOME=/', u'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'], u'User': u'', u'CpuShares': 0, u'AttachStdout': True, u'NetworkDisabled': False, u'WorkingDir': u'', u'Cmd': [u'/bin/bash -l -c "/usr/bin/pixelated-user-agent --dispatcher"'], u'StdinOnce': False, u'AttachStdin': False, u'Volumes': {u'/mnt/user': {}}, u'Tty': False, u'AttachStderr': True, u'Domainname': u'', u'Image': u'pixelated', u'ExposedPorts': {u'4567/tcp': {}}}, u'Id': u'f2cdb04277e9e056c610240edffe8ff94ae272e462312c270e5300975d60af89', u'NetworkSettings': {u'Bridge': u'docker0', u'PortMapping': None, u'Gateway': u'172.17.42.1', u'IPPrefixLen': 16, u'IPAddress': u'172.17.0.14', u'Ports': {u'4567/tcp': [{u'HostPort': u'5000', u'HostIp': u'0.0.0.0'}]}}, u'Name': u'/folker'} client = docker_mock.return_value client.containers.return_value = [container] client.inspect_container.return_value = info psutil_mock = process_mock.return_value psutil_mock.memory_info.return_value = pmem(1024, 2048) provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') provider.add('test', 'password') # when usage = provider.memory_usage() # then self.assertEqual({'total_usage': 1024, 'average_usage': 1024, 'agents': [ {'name': 'test', 'memory_usage': 1024} ]}, usage) def test_that_existing_agents_are_autodiscovered(self): agent = os.path.join(self.root_path, 'test') os.mkdir(agent) provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') self.assertEqual(['test'], provider.list()) def test_authenticate(self): provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') provider.add('test', 'password') self.assertTrue(provider.authenticate('test', 'password')) self.assertFalse(provider.authenticate('test', 'something else')) def test_remove_error_if_not_exist(self): provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') self.assertRaises(ValueError, provider.remove, 'does_not_exist') @patch('pixelated.provider.docker.docker.Client') def test_remove(self, docker_mock): # given client = docker_mock.return_value client.containers.return_value = [] provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') provider.add('test', 'password') # when provider.remove('test') # then self.assertFalse(exists(join(self.root_path, 'test'))) self.assertFalse('test' in provider.list()) @patch('pixelated.provider.docker.docker.Client') def test_cannot_remove_while_running(self, docker_mock): # given client = docker_mock.return_value container = {u'Status': u'Up 20 seconds', u'Created': 1404904929, u'Image': u'pixelated:latest', u'Ports': [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 5000, u'PrivatePort': 4567}], u'Command': u'sleep 100', u'Names': [u'/test'], u'Id': u'f59ee32d2022b1ab17eef608d2cd617b7c086492164b8c411f1cbcf9bfef0d87'} client.containers.side_effect = [[], [], [container]] client.wait.return_value = 0 provider = self._create_initialized_provider(self.root_path, self._adapter, 'some docker url') provider.add('test', 'password') provider.start('test') # when/then self.assertRaises(ValueError, provider.remove, 'test') @patch('pixelated.provider.docker.TempDir') @patch('pixelated.provider.docker.pkg_resources') @patch('pixelated.provider.docker.docker.Client') def test_use_build_script_instead_of_docker_file_if_available(self, docker_mock, res_mock, tempDir_mock): # given provider = DockerProvider(self.root_path, self._adapter, 'leap_provider', 'some docker url') tempBuildDir = TempDir() try: tempDir_mock.return_value = tempBuildDir tempBuildDir_name = tempBuildDir.name with NamedTemporaryFile() as file: res_mock.resource_exists.return_value = True res_mock.resource_string.return_value = '#!/bin/bash\necho %s $PWD > %s' % (file.name, file.name) # when provider.initialize() # then res_mock.resource_exists.assert_called_with('pixelated.resources', 'init-pixelated-docker-context.sh') res_mock.resource_string.assert_called_with('pixelated.resources', 'init-pixelated-docker-context.sh') with open(file.name, "r") as input: data = input.read().replace('\n', '') self.assertEqual('%s %s' % (file.name, os.path.realpath(tempBuildDir_name)), data) docker_mock.return_value.build.assert_called_once_with(path=tempBuildDir_name, tag='pixelated:latest', fileobj=None) finally: tempBuildDir.dissolve() @patch('pixelated.provider.docker.docker.Client') def test_that_authenticate_writes_password_to_fifo(self, docker_mock): provider = DockerProvider(self.root_path, self._adapter, 'leap_provider_hostname', 'some docker url') provider.initialize() provider.add('test', 'password') provider.authenticate('test', 'password') fifo_file = join(self.root_path, 'test', 'data', 'credentials-fifo') self.assertTrue(stat.S_ISFIFO(os.stat(fifo_file).st_mode)) with open(fifo_file, 'r') as fifo: config = json.loads(fifo.read()) self.assertEqual('leap_provider_hostname', config['leap_provider_hostname']) self.assertEqual('test', config['user']) self.assertEqual('password', config['password']) self._assert_file_gets_deleted(fifo_file) def _assert_file_gets_deleted(self, filename): start = clock() timeout = 5 while (clock() - start) < timeout and exists(filename): sleep(0.1) self.assertFalse(exists(filename)) @patch('pixelated.provider.docker.docker.Client') def footest_that_authenticate_deletes_fifo_after_timeout(self, docker_mock): provider = DockerProvider(self.root_path, self._adapter, 'some docker url') provider.initialize() provider.add('test', 'password') fifo_file = join(self.root_path, 'test', 'data', 'credentials-fifo') provider.authenticate('test', 'password') sleep(3) self.assertFalse(stat.S_ISFIFO(os.stat(fifo_file).st_mode)) def _create_initialized_provider(self, root_path, adapter, docker_url=DockerProvider.DEFAULT_DOCKER_URL): provider = DockerProvider(root_path, adapter, 'leap_provider_hostname', 'leap provider ca', docker_url) provider._initializing = False return provider