def test_get_payments(self, *_): self.client = Client(datadir=self.path, transaction_system=True, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) payments = [ Payment(subtask=uuid.uuid4(), status=PaymentStatus.awaiting, payee=str(uuid.uuid4()), value=2 * 10**18, created=time.time(), modified=time.time()) for _ in xrange(2) ] db = Mock() db.get_newest_payment.return_value = payments self.client.transaction_system.payments_keeper.db = db received_payments = self.client.get_payments_list() self.assertEqual(len(received_payments), len(payments)) for i in xrange(len(payments)): self.assertEqual(received_payments[i]['subtask'], payments[i].subtask) self.assertEqual(received_payments[i]['status'], payments[i].status.value) self.assertEqual(received_payments[i]['payee'], unicode(payments[i].payee)) self.assertEqual(received_payments[i]['value'], unicode(payments[i].value))
def test_get_status(self, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) c = self.client c.task_server = MagicMock() c.task_server.task_computer.get_progresses.return_value = {} c.p2pservice = MagicMock() c.p2pservice.get_peers.return_value = ["ABC", "DEF"] c.transaction_system = MagicMock() status = c.get_status() self.assertIn("Waiting for tasks", status) self.assertIn("Active peers in network: 2", status) mock1 = MagicMock() mock1.get_progress.return_value = 0.25 mock2 = MagicMock() mock2.get_progress.return_value = 0.33 c.task_server.task_computer.get_progresses.return_value = { "id1": mock1, "id2": mock2 } c.p2pservice.get_peers.return_value = [] status = c.get_status() self.assertIn("Computing 2 subtask(s)", status) self.assertIn("id1 (25.0%)", status) self.assertIn("id2 (33.0%)", status) self.assertIn("Active peers in network: 0", status) c.config_desc.accept_tasks = 0 status = c.get_status() self.assertIn("Computing 2 subtask(s)", status) c.task_server.task_computer.get_progresses.return_value = {} status = c.get_status() self.assertIn("Not accepting tasks", status)
def setUp(self): DatabaseFixture.setUp(self) LogTestCase.setUp(self) self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False)
def test_collect_gossip(self, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.client.start_network() self.client.collect_gossip()
def test_sync(self, *_): self.client = Client(datadir=self.path, transaction_system=True, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.client.sync() # TODO: assertTrue when re-enabled self.assertFalse(self.client.transaction_system.sync.called)
def test_metadata(self, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) meta = self.client.get_metadata() self.assertIsNotNone(meta) self.assertEqual(meta, dict())
def test_config_override_valid(self, *_): self.assertTrue(hasattr(ClientConfigDescriptor(), "node_address")) c = Client(datadir=self.path, node_address='1.0.0.0', transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.assertEqual(c.config_desc.node_address, '1.0.0.0') c.quit()
def test_payment_address(self, *_): self.client = Client(datadir=self.path, transaction_system=True, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) payment_address = self.client.get_payment_address() self.assertIsInstance(payment_address, unicode) self.assertTrue(len(payment_address) > 0)
def test_description(self, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.assertEqual(self.client.get_description(), "") desc = u"ADVANCE DESCRIPTION\n\tSOME TEXT" self.client.change_description(desc) self.assertEqual(self.client.get_description(), desc)
def test_do_work(self, log, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) c = self.client c.sync = Mock() c.p2pservice = Mock() c.task_server = Mock() c.resource_server = Mock() c.ranking = Mock() c.check_payments = Mock() # Test if method exits if p2pservice is not present c.p2pservice = None c.config_desc.send_pings = False c._Client__do_work() assert not log.exception.called assert not c.check_payments.called # Test calls with p2pservice c.p2pservice = Mock() c._Client__do_work() assert not c.p2pservice.ping_peers.called assert not log.exception.called assert c.p2pservice.sync_network.called assert c.task_server.sync_network.called assert c.resource_server.sync_network.called assert c.ranking.sync_network.called assert c.check_payments.called # Enable pings c.config_desc.send_pings = True # Make methods throw exceptions def raise_exc(): raise Exception('Test exception') c.p2pservice.sync_network = raise_exc c.task_server.sync_network = raise_exc c.resource_server.sync_network = raise_exc c.ranking.sync_network = raise_exc c.check_payments = raise_exc c._Client__do_work() assert c.p2pservice.ping_peers.called assert log.exception.call_count == 5
def test_datadir_lock(self, *_): # Let's use non existing dir as datadir here to check how the Client # is able to cope with that. datadir = os.path.join(self.path, "non-existing-dir") self.client = Client(datadir=datadir, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.assertEqual(self.client.config_desc.node_address, '') with self.assertRaises(IOError): Client(datadir=datadir)
def __init__(self, datadir=None, transaction_system=False, **config_overrides): self.default_environments = [] self.client = Client(datadir=datadir, transaction_system=transaction_system, **config_overrides) self.rpc_router = None self.rpc_session = None import logging self.logger = logging.getLogger("app")
def make_client(*_, **kwargs): default_kwargs = { 'app_config': Mock(), 'config_desc': ClientConfigDescriptor(), 'keys_auth': Mock( _private_key='a' * 32, key_id='a' * 64, public_key=b'a' * 128, ), 'database': Mock(), 'transaction_system': Mock(), 'connect_to_known_hosts': False, 'use_docker_manager': False, 'use_monitor': False, } default_kwargs.update(kwargs) client = Client(**default_kwargs) return client
def create_client(datadir): # executed in a subprocess from golem.network.stun import pystun pystun.get_ip_info = override_ip_info from golem.client import Client app_config = AppConfig.load_config(datadir) config_desc = ClientConfigDescriptor() config_desc.init_from_app_config(app_config) config_desc.key_difficulty = 0 config_desc.use_upnp = False from golem.core.keysauth import KeysAuth with mock.patch.dict('ethereum.keys.PBKDF2_CONSTANTS', {'c': 1}): keys_auth = KeysAuth( datadir=datadir, private_key_name=faker.Faker().pystr(), password='******', difficulty=config_desc.key_difficulty, ) database = Database( db, fields=DB_FIELDS, models=DB_MODELS, db_dir=datadir) ets = _make_mock_ets() return Client(datadir=datadir, app_config=app_config, config_desc=config_desc, keys_auth=keys_auth, database=database, transaction_system=ets, use_monitor=False, connect_to_known_hosts=False, use_docker_manager=False)
def setUp(self): TestDirFixtureWithReactor.setUp(self) self.client = Client.__new__(Client) from threading import Lock self.client.lock = Lock() self.client.task_tester = None self.logic = GuiApplicationLogic() self.app = Gui(self.logic, AppMainWindow)
def setUp(self): DatabaseFixture.setUp(self) LogTestCase.setUp(self) self.client = Client.__new__(Client) from threading import Lock self.client.lock = Lock() self.client.task_tester = None self.logic = GuiApplicationLogic() self.app = Gui(self.logic, AppMainWindow)
def test_activate_hw_preset(self, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) config = self.client.config_desc config.hardware_preset_name = 'non-existing' config.num_cores = 0 config.max_memory_size = 0 config.max_resource_size = 0 self.client.activate_hw_preset('custom') assert config.hardware_preset_name == 'custom' assert config.num_cores > 0 assert config.max_memory_size > 0 assert config.max_resource_size > 0
def test_config_override_invalid(self, *_): """Test that Client() does not allow to override properties that are not in ClientConfigDescriptor. """ self.assertFalse(hasattr(ClientConfigDescriptor(), "node_colour")) with self.assertRaises(AttributeError): Client(datadir=self.path, node_colour='magenta', transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False)
def test_remove_resources(self, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) def unique_dir(): d = os.path.join(self.path, str(uuid.uuid4())) if not os.path.exists(d): os.makedirs(d) return d c = self.client c.task_server = Mock() c.task_server.get_task_computer_root.return_value = unique_dir() c.task_server.task_manager.get_task_manager_root.return_value = unique_dir( ) c.resource_server = Mock() c.resource_server.get_distributed_resource_root.return_value = unique_dir( ) d = c.get_computed_files_dir() self.assertIn(self.path, d) self.additional_dir_content([3], d) c.remove_computed_files() self.assertEqual(os.listdir(d), []) d = c.get_distributed_files_dir() self.assertIn(self.path, os.path.normpath(d)) # normpath for mingw self.additional_dir_content([3], d) c.remove_distributed_files() self.assertEqual(os.listdir(d), []) d = c.get_received_files_dir() self.assertIn(self.path, d) self.additional_dir_content([3], d) c.remove_received_files() self.assertEqual(os.listdir(d), [])
def create_client(datadir): # executed in a subprocess import stun stun.get_ip_info = override_ip_info from golem.client import Client return Client(datadir=datadir, use_monitor=False, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, estimated_lux_performance=1000.0, estimated_blender_performance=1000.0)
def inner(*mocks): client = None try: client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) thread = Thread( target=lambda: start_client(start_ranking=False, client=client, reactor=self._get_reactor())) thread.daemon = True thread.start() message = queue.get(True, 10) assert unicode(message).find(expected_result) != -1 except Exception as exc: self.fail(u"Cannot start client process: {}".format(exc)) finally: if client: client.quit()
def test_interpret_metadata(self, *_): from golem.network.ipfs.daemon_manager import IPFSDaemonManager from golem.network.p2p.p2pservice import P2PService self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False) self.client.p2pservice = P2PService(MagicMock(), self.client.config_desc, self.client.keys_auth) self.client.ipfs_manager = IPFSDaemonManager() meta = self.client.get_metadata() assert meta and meta['ipfs'] ip = '127.0.0.1' port = 40102 node = MagicMock() node.prv_addr = ip node.prv_port = port self.client.interpret_metadata(meta, ip, port, node)
def _create_client(self, task_id, postfix): directory = os.path.join(self.tempdir, 'node' + postfix) dir_manager = DirManager(directory) cls = self._resource_manager_class resource_manager = cls.__new__(cls) resource_manager.__init__(dir_manager) database = Database( db, fields=DB_FIELDS, models=DB_MODELS, db_dir=directory) with mock.patch('golem.client.node_info_str'): client = Client(datadir=dir_manager.root_path, app_config=mock.Mock(), config_desc=ClientConfigDescriptor(), keys_auth=mock.Mock(), database=database, transaction_system=mock.Mock(), connect_to_known_hosts=False, use_docker_manager=False, use_monitor=False) client.resource_server = BaseResourceServer(resource_manager, dir_manager, mock.Mock(), client) with mock.patch( "golem.network.concent.handlers_library" ".HandlersLibrary" ".register_handler"): client.task_server = TaskServer( node=Node(prv_addr='127.0.0.1', hyperdrive_prv_port=3282), config_desc=mock.Mock(), client=client, use_docker_manager=False, ) client.start = mock.Mock() client.start_network = mock.Mock() client.task_server.sync_network = mock.Mock() client.task_server.start_accepting = mock.Mock() client.task_server.task_computer = mock.Mock() get_peer = mock.Mock(return_value=mock.Mock(host='127.0.0.1', port='3282')) transport = mock.Mock(getPeer=get_peer) task_session = TaskSession(mock.Mock(server=client.task_server, transport=transport)) task_session.task_id = task_id resource_dir = resource_manager.storage.get_dir(task_id) return client, resource_dir, task_session
def setUp(self): super(TestClientRPCMethods, self).setUp() client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) client.sync = Mock() client.p2pservice = Mock() client.p2pservice.peers = {} client.task_server = Mock() client.monitor = Mock() self.client = client
class Node(object): """ Simple Golem Node connecting console user interface with Client :type client golem.client.Client: """ def __init__(self, datadir=None, transaction_system=False, **config_overrides): self.default_environments = [] self.client = Client(datadir=datadir, transaction_system=transaction_system, **config_overrides) self.rpc_router = None self.rpc_session = None import logging self.logger = logging.getLogger("app") def initialize(self): self.load_environments(self.default_environments) self.client.sync() self.client.start() def load_environments(self, environments): for env in environments: env.accept_tasks = True self.client.environments_manager.add_environment(env) def connect_with_peers(self, peers): for peer in peers: self.client.connect(peer) def add_tasks(self, tasks): for task_def in tasks: task_builder = self._get_task_builder(task_def) golem_task = Task.build_task( task_builder(self.client.get_node_name(), task_def, self.client.datadir)) self.client.enqueue_new_task(golem_task) def run(self, use_rpc=False): try: from twisted.internet import reactor if use_rpc: config = self.client.config_desc reactor.callWhenRunning(self._start_rpc_server, config.rpc_address, int(config.rpc_port)) reactor.run() except Exception as ex: self.logger.error("Reactor error: {}".format(ex)) finally: self.client.quit() sys.exit(0) def _start_rpc_server(self, host, port): from twisted.internet import reactor from golem.rpc.router import CrossbarRouter self.rpc_router = CrossbarRouter(host=host, port=port, datadir=self.client.datadir) reactor.addSystemEventTrigger("before", "shutdown", self.rpc_router.stop) self.rpc_router.start(reactor, self._router_ready, self._rpc_error) def _router_ready(self, *_): methods = object_method_map(self.client, CORE_METHOD_MAP) self.rpc_session = Session(self.rpc_router.address, methods=methods) self.client.configure_rpc(self.rpc_session) self.rpc_session.connect().addErrback(self._rpc_error) def _rpc_error(self, err): self.logger.error("RPC error: {}".format(err)) def _get_task_builder(self, task_def): raise NotImplementedError
def __init__( self, # noqa pylint: disable=too-many-arguments datadir: str, app_config: AppConfig, config_desc: ClientConfigDescriptor, # SEE golem.core.variables.CONCENT_CHOICES concent_variant: dict, peers: Optional[List[SocketAddress]] = None, use_monitor: bool = None, use_talkback: bool = None, use_docker_manager: bool = True, geth_address: Optional[str] = None, password: Optional[str] = None) -> None: # DO NOT MAKE THIS IMPORT GLOBAL # otherwise, reactor will install global signal handlers on import # and will prevent the IOCP / kqueue reactors from being installed. from twisted.internet import reactor self._reactor = reactor self._app_config = app_config self._config_desc = config_desc self._datadir = datadir self._use_docker_manager = use_docker_manager self._docker_manager: Optional[DockerManager] = None self._use_monitor = config_desc.enable_monitor \ if use_monitor is None else use_monitor self._use_talkback = config_desc.enable_talkback \ if use_talkback is None else use_talkback self._keys_auth: Optional[KeysAuth] = None self._ets = TransactionSystem( Path(datadir) / 'transaction_system', EthereumConfig, ) self._ets.backwards_compatibility_tx_storage(Path(datadir)) self.concent_variant = concent_variant self.rpc_router: Optional[CrossbarRouter] = None self.rpc_session: Optional[Session] = None self._rpc_publisher: Optional[Publisher] = None self._peers: List[SocketAddress] = peers or [] # Initialize database self._db = Database(db, fields=DB_FIELDS, models=DB_MODELS, db_dir=datadir) self.client: Optional[Client] = None self.apps_manager = AppsManager() self._client_factory = lambda keys_auth: Client( datadir=datadir, app_config=app_config, config_desc=config_desc, keys_auth=keys_auth, database=self._db, transaction_system=self._ets, use_docker_manager=use_docker_manager, use_monitor=self._use_monitor, concent_variant=concent_variant, geth_address=geth_address, apps_manager=self.apps_manager, task_finished_cb=self._try_shutdown) if password is not None: if not self.set_password(password): raise Exception("Password incorrect")
def start_client(start_ranking, datadir=None, transaction_system=False, client=None, reactor=None, **config_overrides): config_logging("client", datadir=datadir) logger = logging.getLogger("golem.client") install_unhandled_error_logger() if not reactor: from twisted.internet import reactor process_monitor = None from golem.core.processmonitor import ProcessMonitor from golem.docker.manager import DockerManager from golem.rpc.router import CrossbarRouter process_monitor = None if not client: client = Client(datadir=datadir, transaction_system=transaction_system, **config_overrides) docker_manager = DockerManager.install(client.config_desc) docker_manager.check_environment() environments = load_environments() client.sync() for env in environments: client.environments_manager.add_environment(env) client.environments_manager.load_config(client.datadir) config = client.config_desc methods = object_method_map(client, CORE_METHOD_MAP) router = CrossbarRouter( host=config.rpc_address, port=config.rpc_port, datadir=client.datadir ) session = Session(router.address, methods=methods) def router_ready(*_): session.connect().addCallbacks(session_ready, start_error) def session_ready(*_): global process_monitor logger.info('Router session ready. Starting client...') try: client.configure_rpc(session) logger.debug('client.start()') client.start() logger.debug('after client.start()') except SystemExit: raise except Exception as exc: logger.exception("Client process error: {}" .format(exc)) logger.info('Starting GUI process...') gui_process = start_gui(router.address) process_monitor = ProcessMonitor(gui_process) process_monitor.add_callbacks(stop_reactor) logger.info('Starting process monitor...') process_monitor.start() reactor.addSystemEventTrigger("before", "shutdown", router.stop) router.start(reactor, router_ready, start_error) if start_ranking: client.ranking.run(reactor) try: reactor.run() except ReactorAlreadyRunning: logger.debug("Client process: reactor is already running") if process_monitor: process_monitor.exit()
def setUp(self): TempDirFixture.setUp(self) self.task_id = str(uuid.uuid4()) self.datadir_1 = os.path.join(self.path, 'node_1') self.datadir_2 = os.path.join(self.path, 'node_2') self.dir_manager_1 = DirManager(self.datadir_1) self.dir_manager_2 = DirManager(self.datadir_2) self.resource_manager_1 = self._resource_manager_class( self.dir_manager_1) self.resource_manager_2 = self._resource_manager_class( self.dir_manager_2) self.client_1 = Client(datadir=self.datadir_1, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.client_2 = Client(datadir=self.datadir_2, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.client_1.start = self.client_2.start = Mock() self.resource_server_1 = BaseResourceServer(self.resource_manager_1, self.dir_manager_1, Mock(), self.client_1) self.resource_server_2 = BaseResourceServer(self.resource_manager_2, self.dir_manager_2, Mock(), self.client_2) self.resource_server_1.client.resource_server = self.resource_server_1 self.resource_server_2.client.resource_server = self.resource_server_2 task_server_1 = TaskServer.__new__(TaskServer, Mock(), Mock(), Mock(), self.client_1) task_server_2 = TaskServer.__new__(TaskServer, Mock(), Mock(), Mock(), self.client_2) task_server_1.client = self.client_1 task_server_2.client = self.client_2 task_server_1.keys_auth = self.client_1.keys_auth task_server_2.keys_auth = self.client_2.keys_auth task_server_1.sync_network = task_server_2.sync_network = Mock() task_server_1.start_accepting = task_server_2.start_accepting = Mock() task_server_1.task_computer = task_server_2.task_computer = Mock() self.client_1.task_server = task_server_1 self.client_2.task_server = task_server_2 self.task_session_1 = TaskSession(Mock()) self.task_session_2 = TaskSession(Mock()) self.task_session_1.task_server = task_server_1 self.task_session_2.task_server = task_server_2 self.task_session_1.task_id = self.task_session_2.task_id = self.task_id self.resource_dir_1 = self.resource_manager_1.storage.get_dir( self.task_id) self.resource_dir_2 = self.resource_manager_2.storage.get_dir( self.task_id) client_options = self.resource_manager_1.build_client_options( task_server_1.get_key_id()) self.resources_relative, self.resources = self._create_resources( self.resource_dir_1) self.resource_manager_1._add_task(self.resources, self.task_id, client_options=client_options)
class AddGetResources(TempDirFixture, LogTestCase): __test__ = False _resource_manager_class = None @staticmethod def _create_resources(resource_dir): resources_relative = [ 'resource_1', os.path.join('dir_1', 'resource_2'), os.path.join('dir_1', 'resource_3'), os.path.join('dir_2', 'subdir', 'resource_4') ] resources = [os.path.join(resource_dir, r) for r in resources_relative] for resource in resources: d = os.path.dirname(resource) if not os.path.exists(d): os.makedirs(d) with open(resource, 'wb') as f: f.write(str(uuid.uuid4()) * 256) return resources_relative, resources def setUp(self): TempDirFixture.setUp(self) self.task_id = str(uuid.uuid4()) self.datadir_1 = os.path.join(self.path, 'node_1') self.datadir_2 = os.path.join(self.path, 'node_2') self.dir_manager_1 = DirManager(self.datadir_1) self.dir_manager_2 = DirManager(self.datadir_2) self.resource_manager_1 = self._resource_manager_class( self.dir_manager_1) self.resource_manager_2 = self._resource_manager_class( self.dir_manager_2) self.client_1 = Client(datadir=self.datadir_1, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.client_2 = Client(datadir=self.datadir_2, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.client_1.start = self.client_2.start = Mock() self.resource_server_1 = BaseResourceServer(self.resource_manager_1, self.dir_manager_1, Mock(), self.client_1) self.resource_server_2 = BaseResourceServer(self.resource_manager_2, self.dir_manager_2, Mock(), self.client_2) self.resource_server_1.client.resource_server = self.resource_server_1 self.resource_server_2.client.resource_server = self.resource_server_2 task_server_1 = TaskServer.__new__(TaskServer, Mock(), Mock(), Mock(), self.client_1) task_server_2 = TaskServer.__new__(TaskServer, Mock(), Mock(), Mock(), self.client_2) task_server_1.client = self.client_1 task_server_2.client = self.client_2 task_server_1.keys_auth = self.client_1.keys_auth task_server_2.keys_auth = self.client_2.keys_auth task_server_1.sync_network = task_server_2.sync_network = Mock() task_server_1.start_accepting = task_server_2.start_accepting = Mock() task_server_1.task_computer = task_server_2.task_computer = Mock() self.client_1.task_server = task_server_1 self.client_2.task_server = task_server_2 self.task_session_1 = TaskSession(Mock()) self.task_session_2 = TaskSession(Mock()) self.task_session_1.task_server = task_server_1 self.task_session_2.task_server = task_server_2 self.task_session_1.task_id = self.task_session_2.task_id = self.task_id self.resource_dir_1 = self.resource_manager_1.storage.get_dir( self.task_id) self.resource_dir_2 = self.resource_manager_2.storage.get_dir( self.task_id) client_options = self.resource_manager_1.build_client_options( task_server_1.get_key_id()) self.resources_relative, self.resources = self._create_resources( self.resource_dir_1) self.resource_manager_1._add_task(self.resources, self.task_id, client_options=client_options) def tearDown(self): self.client_1.quit() self.client_2.quit() TempDirFixture.tearDown(self) def test(self): send_buf_1 = [] send_buf_2 = [] self.task_session_1.send = lambda x: send_buf_1.append(x) self.task_session_2.send = lambda x: send_buf_2.append(x) msg_get_resource = MessageGetResource(task_id=self.task_id) msg = MessageGetResource.deserialize_message( msg_get_resource.serialize()) assert msg self.task_session_1._react_to_get_resource(msg) msg_resource_list = send_buf_1.pop() msg = MessageResourceList.deserialize_message( msg_resource_list.serialize()) assert msg self.task_session_2._react_to_resource_list(msg) self.resource_server_2._download_resources(async=False) for r in self.resources_relative: location_1 = os.path.join(self.resource_dir_1, r) location_2 = os.path.join(self.resource_dir_2, r) assert os.path.exists(location_1) assert os.path.exists(location_2) sha_256_1 = file_sha_256(location_1) sha_256_2 = file_sha_256(location_2) assert sha_256_1 == sha_256_2, '{} != {}'.format( sha_256_1.encode('hex'), sha_256_2.encode('hex'))
class TestClient(TestWithDatabase): def tearDown(self): if hasattr(self, 'client'): self.client.quit() def test_get_payments(self, *_): self.client = Client(datadir=self.path, transaction_system=True, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) payments = [ Payment(subtask=uuid.uuid4(), status=PaymentStatus.awaiting, payee=str(uuid.uuid4()), value=2 * 10**18, created=time.time(), modified=time.time()) for _ in xrange(2) ] db = Mock() db.get_newest_payment.return_value = payments self.client.transaction_system.payments_keeper.db = db received_payments = self.client.get_payments_list() self.assertEqual(len(received_payments), len(payments)) for i in xrange(len(payments)): self.assertEqual(received_payments[i]['subtask'], payments[i].subtask) self.assertEqual(received_payments[i]['status'], payments[i].status.value) self.assertEqual(received_payments[i]['payee'], unicode(payments[i].payee)) self.assertEqual(received_payments[i]['value'], unicode(payments[i].value)) def test_payment_address(self, *_): self.client = Client(datadir=self.path, transaction_system=True, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) payment_address = self.client.get_payment_address() self.assertIsInstance(payment_address, unicode) self.assertTrue(len(payment_address) > 0) @patch('golem.transactions.ethereum.ethereumtransactionsystem.' 'EthereumTransactionSystem.sync') def test_sync(self, *_): self.client = Client(datadir=self.path, transaction_system=True, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.client.sync() # TODO: assertTrue when re-enabled self.assertFalse(self.client.transaction_system.sync.called) def test_remove_resources(self, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) def unique_dir(): d = os.path.join(self.path, str(uuid.uuid4())) if not os.path.exists(d): os.makedirs(d) return d c = self.client c.task_server = Mock() c.task_server.get_task_computer_root.return_value = unique_dir() c.task_server.task_manager.get_task_manager_root.return_value = unique_dir( ) c.resource_server = Mock() c.resource_server.get_distributed_resource_root.return_value = unique_dir( ) d = c.get_computed_files_dir() self.assertIn(self.path, d) self.additional_dir_content([3], d) c.remove_computed_files() self.assertEqual(os.listdir(d), []) d = c.get_distributed_files_dir() self.assertIn(self.path, os.path.normpath(d)) # normpath for mingw self.additional_dir_content([3], d) c.remove_distributed_files() self.assertEqual(os.listdir(d), []) d = c.get_received_files_dir() self.assertIn(self.path, d) self.additional_dir_content([3], d) c.remove_received_files() self.assertEqual(os.listdir(d), []) def test_datadir_lock(self, *_): # Let's use non existing dir as datadir here to check how the Client # is able to cope with that. datadir = os.path.join(self.path, "non-existing-dir") self.client = Client(datadir=datadir, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.assertEqual(self.client.config_desc.node_address, '') with self.assertRaises(IOError): Client(datadir=datadir) def test_metadata(self, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) meta = self.client.get_metadata() self.assertIsNotNone(meta) self.assertEqual(meta, dict()) def test_description(self, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.assertEqual(self.client.get_description(), "") desc = u"ADVANCE DESCRIPTION\n\tSOME TEXT" self.client.change_description(desc) self.assertEqual(self.client.get_description(), desc) @unittest.skip('IPFS metadata is currently disabled') def test_interpret_metadata(self, *_): from golem.network.ipfs.daemon_manager import IPFSDaemonManager from golem.network.p2p.p2pservice import P2PService self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False) self.client.p2pservice = P2PService(MagicMock(), self.client.config_desc, self.client.keys_auth) self.client.ipfs_manager = IPFSDaemonManager() meta = self.client.get_metadata() assert meta and meta['ipfs'] ip = '127.0.0.1' port = 40102 node = MagicMock() node.prv_addr = ip node.prv_port = port self.client.interpret_metadata(meta, ip, port, node) def test_get_status(self, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) c = self.client c.task_server = MagicMock() c.task_server.task_computer.get_progresses.return_value = {} c.p2pservice = MagicMock() c.p2pservice.get_peers.return_value = ["ABC", "DEF"] c.transaction_system = MagicMock() status = c.get_status() self.assertIn("Waiting for tasks", status) self.assertIn("Active peers in network: 2", status) mock1 = MagicMock() mock1.get_progress.return_value = 0.25 mock2 = MagicMock() mock2.get_progress.return_value = 0.33 c.task_server.task_computer.get_progresses.return_value = { "id1": mock1, "id2": mock2 } c.p2pservice.get_peers.return_value = [] status = c.get_status() self.assertIn("Computing 2 subtask(s)", status) self.assertIn("id1 (25.0%)", status) self.assertIn("id2 (33.0%)", status) self.assertIn("Active peers in network: 0", status) c.config_desc.accept_tasks = 0 status = c.get_status() self.assertIn("Computing 2 subtask(s)", status) c.task_server.task_computer.get_progresses.return_value = {} status = c.get_status() self.assertIn("Not accepting tasks", status) def test_quit(self, *_): self.client = Client(datadir=self.path) self.client.db = None self.client.quit() @patch('twisted.internet.reactor', create=True) def test_collect_gossip(self, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) self.client.start_network() self.client.collect_gossip() @patch('golem.client.log') def test_do_work(self, log, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) c = self.client c.sync = Mock() c.p2pservice = Mock() c.task_server = Mock() c.resource_server = Mock() c.ranking = Mock() c.check_payments = Mock() # Test if method exits if p2pservice is not present c.p2pservice = None c.config_desc.send_pings = False c._Client__do_work() assert not log.exception.called assert not c.check_payments.called # Test calls with p2pservice c.p2pservice = Mock() c._Client__do_work() assert not c.p2pservice.ping_peers.called assert not log.exception.called assert c.p2pservice.sync_network.called assert c.task_server.sync_network.called assert c.resource_server.sync_network.called assert c.ranking.sync_network.called assert c.check_payments.called # Enable pings c.config_desc.send_pings = True # Make methods throw exceptions def raise_exc(): raise Exception('Test exception') c.p2pservice.sync_network = raise_exc c.task_server.sync_network = raise_exc c.resource_server.sync_network = raise_exc c.ranking.sync_network = raise_exc c.check_payments = raise_exc c._Client__do_work() assert c.p2pservice.ping_peers.called assert log.exception.call_count == 5 @patch('golem.client.log') @patch('golem.client.dispatcher.send') def test_publish_events(self, send, log, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) c = self.client def get_balance(*_): d = Deferred() d.callback((1, 2, 3)) return d c.task_server = Mock() c.task_server.task_computer = TaskComputer.__new__(TaskComputer) c.task_server.task_computer.stats = dict() c.get_balance = get_balance c.get_task_count = lambda *_: 0 c.get_supported_task_count = lambda *_: 0 c.connection_status = lambda *_: 'test' c.config_desc.node_snapshot_interval = 1 c.config_desc.network_check_interval = 1 c._publish = Mock() past_time = time.time() - 10**10 future_time = time.time() + 10**10 c.last_nss_time = future_time c.last_net_check_time = future_time c.last_balance_time = future_time c.last_tasks_time = future_time c._Client__publish_events() assert not send.called assert not log.debug.called assert not c._publish.called c.last_nss_time = past_time c.last_net_check_time = past_time c.last_balance_time = past_time c.last_tasks_time = past_time c._Client__publish_events() assert not log.debug.called assert send.call_count == 2 assert c._publish.call_count == 3 def raise_exc(*_): raise Exception('Test exception') c.get_balance = raise_exc c._publish = Mock() send.call_count = 0 c.last_nss_time = past_time c.last_net_check_time = past_time c.last_balance_time = past_time c.last_tasks_time = past_time c._Client__publish_events() assert log.debug.called assert send.call_count == 2 assert c._publish.call_count == 2 def test_activate_hw_preset(self, *_): self.client = Client(datadir=self.path, transaction_system=False, connect_to_known_hosts=False, use_docker_machine_manager=False, use_monitor=False) config = self.client.config_desc config.hardware_preset_name = 'non-existing' config.num_cores = 0 config.max_memory_size = 0 config.max_resource_size = 0 self.client.activate_hw_preset('custom') assert config.hardware_preset_name == 'custom' assert config.num_cores > 0 assert config.max_memory_size > 0 assert config.max_resource_size > 0 def test_presets(self, *_): Client.save_task_preset("Preset1", "TaskType1", "data1") Client.save_task_preset("Preset2", "TaskType1", "data2") Client.save_task_preset("Preset1", "TaskType2", "data3") Client.save_task_preset("Preset3", "TaskType2", "data4") presets = Client.get_task_presets("TaskType1") assert len(presets) == 2 assert presets["Preset1"] == "data1" assert presets["Preset2"] == "data2" presets = Client.get_task_presets("TaskType2") assert len(presets) == 2 assert presets["Preset1"] == "data3" assert presets["Preset3"] == "data4" Client.delete_task_preset("TaskType2", "Preset1") presets = Client.get_task_presets("TaskType1") assert len(presets) == 2 assert presets["Preset1"] == "data1" presets = Client.get_task_presets("TaskType2") assert len(presets) == 1 assert presets.get("Preset1") is None