Пример #1
0
  def get_heap_profile(self):
    """Returns raw heap profiling data"""
    self._first_profile_id = None

    first_profile_id = \
      self._sendw('Profiler.getProfileHeaders')['result']['headers'][0]['uid']

    logging.info('Profile ID: %i' % self._first_profile_id)

    self._heap_profile_chunks = []
    self._heap_profile_data_recorded = False

    def heap_snapshot_data_callback(response):
      if response['method'] == 'Profiler.addHeapSnapshotChunk':
        self._heap_profile_chunks.append(response['params']['chunk'])
      elif response['method'] == 'Profiler.finishHeapSnapshot':
        self._heap_profile_data_recorded = True

    self._communicator.add_domain_callback('Profiler', 'heap_snapshot_data', heap_snapshot_data_callback)
    self._communicator.send_cmd('Profiler.getProfile', {'type': 'HEAP', 'uid': self._first_profile_id})

    wait_until(lambda: self._heap_profile_data_recorded)
    self._communicator.remove_domain_callback('Profiler', 'heap_snapshot_data')

    return ''.join(self._heap_profile_chunks)
Пример #2
0
  def run_js(self, js, is_expression=True):
    """
    Runs JS in the current browser page, and returns the value if the JS is an expression.
    Otherwise, returns None.
    """
    self._got_js_result = False
    self._js_result = None

    def response_handler(response):
      if not is_expression:
        self._got_js_result = True
      elif 'result' in response and 'result' in response['result']:
        if 'wasThrown' in response['result']['result']:
          logging.error('Received error after running JS: {0}\n{1}'.format(js, response['result']))

        self._js_result = response['result']['result']['value']
        self._got_js_result = True

    self._communicator.send_cmd('Runtime.evaluate',
                                {'expression': js,
                                 'objectGroup': 'group',
                                 'returnByValue': True },
                                response_handler)
    self._communicator.send_cmd('Runtime.releaseObjectGroup',
                                {'objectGroup': 'group' },
                                response_handler)

    wait_until(lambda: self._got_js_result)
    return self._js_result
Пример #3
0
  def has_heap_profiler(self):
    def response_handler(response):
      self._has_heap_profiler = response['result']['result']

    self._communicator.send_cmd('Profiler.hasHeapProfiler', {}, response_handler)

    wait_until(lambda: self._has_heap_profiler != None)
    return self._has_heap_profiler
Пример #4
0
  def clear_profiles(self):
    """Deletes all profiles that have been recorded (e.g. heap profiles, cpu profiles...)"""
    self._cleared_heap_profiles = False

    def response_handler(response):
      self._cleared_heap_profiles = True

    self._communicator.send_cmd('Profiler.clearProfiles', {}, response_handler)
    wait_until(lambda: self._cleared_heap_profiles)
Пример #5
0
  def can_clear_http_cache(self):
    """Returns true if browser supports clearing the cache via remote debugging protocol"""
    def response_handler(response):
      self._can_clear_cache = response['result']['result']

    self._communicator.send_cmd('Network.canClearBrowserCache', {}, response_handler)

    wait_until(lambda: self._can_clear_cache != None)
    return self._can_clear_cache
Пример #6
0
  def navigate_to(self, url):
    """Navigates the browser window to the given url"""
    self._navigated = False

    def response_handler(response):
      self._navigated = True

    self._communicator.send_cmd('Page.navigate', {'url': url}, response_handler)
    wait_until(lambda: self._navigated)
Пример #7
0
  def disable_profiling(self):
    if not self._profiling_enabled:
      log.warning('Profiling already disabled')
      return

    def stop_callback(m):
      self._profiling_enabled = False

    self._communicator.send_cmd('Profiler.disable')
    wait_until(lambda: not self._profiling_enabled)
Пример #8
0
  def start_css_selector_profiling(self):
    if self._css_profiling_started:
      log.error('CSS Profiling already started')
      return

    def response_handler(response):
      self._css_profiling_started = True

    self._communicator.send_cmd('CSS.startSelectorProfiler', {}, response_handler)
    wait_until(lambda: self._css_profiling_started)
Пример #9
0
    def create_lun(self, pool, name, size, **kwargs):
        pool = self.system.get_pool(name=pool)
        try:
            lun = pool.create_lun(lun_name=name,
                                  size_gb=size, **kwargs)
        except storops_ex.UnityLunNameInUseError:
            lun = self.system.get_lun(name=name)

        utils.wait_until(condition=Condition.is_lun_io_ready, lun=lun)
        return lun
Пример #10
0
  def stop_timeline_monitoring(self):
    if not self._timeline_started:
      log.error('Timeline monitoring not started')
      return

    def stop_callback(m):
      self._timeline_started = False

    self._communicator.send_cmd('Timeline.stop', {}, stop_callback)
    self._communicator.remove_domain_callbacks('Timeline')
    wait_until(lambda: not self._timeline_started)
Пример #11
0
  def clear_http_cache(self):
    self._cache_clear_complete = False

    def response_handler(response):
      if 'error' in response:
        logging.error('Error received: ' + pformat(response['error']))
      else:
        self._cache_clear_complete = True

    self._communicator.send_cmd('Network.clearBrowserCache', {}, response_handler)
    wait_until(lambda: self._cache_clear_complete)
Пример #12
0
  def clear_cookies(self):
    self._cookie_clear_complete = False

    def response_handler(response):
      if 'error' in response:
        log.error('Error received: ' + pformat(response["error"]))
      else:
        self._cookie_clear_complete = True

    self._communicator.send_cmd('Network.clearBrowserCookies', {}, response_handler)
    wait_until(lambda: self._cookie_clear_complete)
Пример #13
0
  def stop_page_event_monitoring(self):
    if not self._page_events_enabled:
      log.error('Page events not being monitored')
      return

    def stop_callback(m):
      self._page_events_enabled = False

    self._communicator.send_cmd('Page.disable', {}, stop_callback)
    wait_until(lambda: not self._page_events_enabled)
    self._communicator.remove_domain_callbacks('Page')
Пример #14
0
  def stop_network_monitoring(self):
    if not self._network_enabled:
      log.error('Network monitoring not enabled')
      return

    def stop_callback(m):
      self._network_enabled = False

    self._communicator.send_cmd('Network.disable', {}, stop_callback)
    self._communicator.remove_domain_callbacks('Network')
    wait_until(lambda: not self._network_enabled)
Пример #15
0
  def get_dom_node_count(self):
    """Returns an object containing groups of DOM nodes and their respective sizes"""
    self._dom_node_count = None

    def response_handler(response):
      self._dom_node_count = response['result']

    self._communicator.send_cmd('Memory.getDOMNodeCount', {}, response_handler)

    wait_until(lambda: self._dom_node_count)
    return self._dom_node_count
Пример #16
0
  def enable_debugging(self):
    """Enables debugging, which lets you set JS breakpoints. Also required for doing heap profiles"""
    if self._debugging_enabled:
      log.error('Debugging already enabled')
      return

    def start_callback(m):
      self._debugging_enabled = True

    self._communicator.send_cmd('Debugger.enable', {}, start_callback)
    wait_until(lambda: self._debugging_enabled)
Пример #17
0
  def stop_css_selector_profiling(self):
    if not self._css_profiling_started:
      log.error('CSS selector profiling not started')
      return

    def response_handler(response):
      self._css_profiling_started = False
      self._css_profile = response

    self._communicator.send_cmd('CSS.stopSelectorProfiler', {}, response_handler)
    wait_until(lambda: not self._css_profiling_started)
    return self._css_profile
Пример #18
0
  def take_heap_snapshot(self):
    """Takes a heap snapshot, which can be retrieved using get_heap_profile()"""
    self._heap_snapshot_finished = False

    def progress_callback(response):
      if response['method'] == 'Profiler.reportHeapSnapshotProgress':
        self._heap_snapshot_finished = (response['params']['done'] == response['params']['total'])

    self._communicator.add_domain_callback('Profiler', 'heap_snapshot_progress', progress_callback)
    self._sendw('Profiler.takeHeapSnapshot')

    wait_until(lambda: self._heap_snapshot_finished)
    self._communicator.remove_domain_callback('Profiler', 'heap_snapshot_progress')
Пример #19
0
  def enable_profiling(self):
    """
    Generally enables profiling. Must be called before calling enable_heap_profiling(). Also required before doing CSS
    profiling, but CSS profiling has not been implemented for this client yet.
    """
    if self._profiling_enabled:
      log.warning('Profiling already enabled')
      return

    def start_callback(m):
      self._profiling_enabled = True

    self._communicator.send_cmd('Profiler.enable', {})
    wait_until(lambda: self._profiling_enabled)
Пример #20
0
  def _sendw(self, command, args={}):
    """
    Send a command with the given arguments, wait till a response is received, 
    and return it.
    """
    rec_id = uuid4().hex
    
    def response_handler(response):
      self._received[rec_id] = True
      self._responses[rec_id] = response

    self._communicator.send_cmd(command, args, response_handler)

    wait_until(lambda: self._received[rec_id])
    response = self._responses[rec_id]
    del self._received[rec_id]
    del self._responses[rec_id]
    return response
Пример #21
0
 def test_new_user_categories(self):
     register_user(self.browser)
     elem = wait_until(self.browser, '//select[@name="category_id"]')
     select = Select(elem)
     for option in select.options:
         if option.text == '(category)':
             self.assertTrue(option.text not in DEFAULT_CATEGORIES)
         else:
             self.assertTrue(option.text.lower() in DEFAULT_CATEGORIES)
     delete_user(self.browser)
Пример #22
0
    def create_lun(self, pool, name, size, provision=None,
                   tier=None, cg_name=None, ignore_thresholds=False):
        pool = self.system.get_pool(name=pool)
        try:
            lun = pool.create_lun(lun_name=name,
                                  size_gb=size,
                                  provision=provision,
                                  tier=tier,
                                  ignore_thresholds=ignore_thresholds)
        except storops_ex.VNXLunNameInUseError:
            lun = self.system.get_lun(name=name)

        utils.wait_until(condition=Condition.is_lun_io_ready, lun=lun)
        if cg_name:
            cg = self.system.get_cg(name=cg_name)
            cg.add_member(lun)
        if provision is vnx_enums.VNXProvisionEnum.COMPRESSED:
            lun.enable_compression()
        return lun
Пример #23
0
  def start_network_monitoring(self, callback):
    """
    Enables processing of network events via the specified callback.

    Network events give information about:
    - Resource requests
    - Resorce responses
    - Resource data transfer progress
    """

    if self._network_enabled:
      log.error('Network monitoring already enabled')
      return

    def start_callback(m):
      self._network_enabled = True

    self._communicator.add_domain_callback('Network', 'network_event', callback)
    self._communicator.send_cmd('Network.enable', {}, start_callback)
    wait_until(lambda: self._network_enabled)
Пример #24
0
  def start_timeline_monitoring(self, callback):
    """
    Enables monitoring of timeline events, including:
      - Resource requests
      - Paint events
      - GC Events
      ...and more
    """

    if self._timeline_started:
      log.error('Timeline monitoring already started')
      return

    def start_callback(m):
      self._timeline_started = True

    self._communicator.add_domain_callback('Timeline', 'timeline_event', callback)
    self._communicator.send_cmd('Timeline.setIncludeMemoryDetails', {'enabled': True})
    self._communicator.send_cmd('Timeline.start', {}, start_callback)
    wait_until(lambda: self._timeline_started)
def step_impl(context, reg):
    client = context.client

    def check_responses():
        if len(client.responses) > 0:
            x = re.search(reg, client.responses[-1:][0])
            if x:
                # found match
                return True

    if not utils.wait_until(
            check_responses, timeout=20, label="Waiting for node startup"):
        raise Exception("Did not receive response matching regex", reg)
Пример #26
0
  def start_page_event_monitoring(self, callback):
    """
    Allows processing of page events via the given callback.

    Page events include:
    - page load
    - domcontent
    - frame navigated
    :param type callback: <description of param>
    :return: eiwojfei
    """

    if self._page_events_enabled:
      log.error('Page events already being monitored')
      return
    def start_callback(m):
      self._page_events_enabled = True

    self._communicator.add_domain_callback('Page', 'page_event', callback)
    self._communicator.send_cmd('Page.enable', {}, start_callback)

    wait_until(lambda: self._page_events_enabled)
Пример #27
0
    def test_timeout_jobs(self):
        with TestAreaContext("job_queue_test_kill") as work_area:
            job_numbers = set()

            def callback(arg):
                nonlocal job_numbers
                job_numbers.add(arg[0]["job_number"])

            job_queue = create_queue(
                never_ending_script,
                max_submit=1,
                max_runtime=5,
                callback_timeout=callback,
            )

            assert job_queue.queue_size == 10
            assert job_queue.is_active()

            pool_sema = BoundedSemaphore(value=10)
            start_all(job_queue, pool_sema)

            # make sure never ending jobs are running
            wait_until(lambda: self.assertTrue(job_queue.is_active()))

            wait_until(lambda: self.assertFalse(job_queue.is_active()))

            job_queue._transition()

            for q_index, job in enumerate(job_queue.job_list):
                assert job.status == JobStatusType.JOB_QUEUE_IS_KILLED
                iens = job_queue._qindex_to_iens[q_index]
                assert job_queue.snapshot()[iens] == str(
                    JobStatusType.JOB_QUEUE_IS_KILLED
                )

            assert job_numbers == set(range(10))

            for job in job_queue.job_list:
                job.wait_for()
Пример #28
0
        def _test_import_endpoint():

            _destroy_elements()

            assert len(self.endpoints) == 0
            assert len(self.cidr_mappings) == 0
            assert len(self.background_tasks) == 0

            cidr_mapping = self.cidr_mappings.create(cidr_block='%s/30' %
                                                     self.lab_endpoint_ip,
                                                     deployment_group=1,
                                                     comments='no comments',
                                                     active=True)

            assert len(self.cidr_mappings) == 1

            selected_cidr = list(self.cidr_mappings)[0]

            assert selected_cidr.cidr_block == cidr_mapping.cidr_block
            assert selected_cidr.deployment_group == cidr_mapping.deployment_group
            assert selected_cidr.comments == cidr_mapping.comments
            assert selected_cidr.active == cidr_mapping.active

            cidr_mapping.generate_endpoints()
            background_tasks = list(self.background_tasks)
            assert len(background_tasks) == 1

            wait_until(_are_all_tasks_complete, total_timeout=300)

            assert len(self.cidr_mappings) > 0
            assert len(self.endpoints) > 0

            assert len(self.endpoints.filter(keywords='no.such.thing')) == 0
            assert len(
                self.endpoints.filter(keywords=self.lab_endpoint_ip)) > 0

            return list(
                self.endpoints.filter(keywords=self.lab_endpoint_ip))[0]
Пример #29
0
    def test_workflow_thread_cancel_ert_script(self):
        with TestAreaContext(
            "python/job_queue/workflow_runner_ert_script"
        ) as work_area:
            WorkflowCommon.createWaitJob()

            joblist = WorkflowJoblist()
            self.assertTrue(joblist.addJobFromFile("WAIT", "wait_job"))
            self.assertTrue("WAIT" in joblist)

            workflow = Workflow("wait_workflow", joblist)

            self.assertEqual(len(workflow), 3)

            workflow_runner = WorkflowRunner(workflow)

            self.assertFalse(workflow_runner.isRunning())

            with workflow_runner:
                self.assertIsNone(workflow_runner.workflowResult())

                wait_until(lambda: self.assertTrue(workflow_runner.isRunning()))
                wait_until(lambda: self.assertFileExists("wait_started_0"))

                wait_until(lambda: self.assertFileExists("wait_finished_0"))

                wait_until(lambda: self.assertFileExists("wait_started_1"))

                workflow_runner.cancel()

                wait_until(lambda: self.assertFileExists("wait_cancelled_1"))

                self.assertTrue(workflow_runner.isCancelled())

            self.assertFileDoesNotExist("wait_finished_1")
            self.assertFileDoesNotExist("wait_started_2")
            self.assertFileDoesNotExist("wait_cancelled_2")
            self.assertFileDoesNotExist("wait_finished_2")
Пример #30
0
    def start_page_event_monitoring(self, callback):
        """
    Allows processing of page events via the given callback.

    Page events include:
    - page load
    - domcontent
    - frame navigated
    :param type callback: <description of param>
    :return: eiwojfei
    """

        if self._page_events_enabled:
            log.error('Page events already being monitored')
            return

        def start_callback(m):
            self._page_events_enabled = True

        self._communicator.add_domain_callback('Page', 'page_event', callback)
        self._communicator.send_cmd('Page.enable', {}, start_callback)

        wait_until(lambda: self._page_events_enabled)
    def _wait_for_socket(self, socket_id):
        """
        Block until the given socket connection is open
        """
        def check_socket():
            socket = self.sockets.get(socket_id, False)
            if socket:
                if socket.isConnected and socket.ws:
                    return True

        if not utils.wait_until(check_socket,
                                timeout=10,
                                label="Waiting for indexer to connect"):
            raise Exception("Failed to connect to indexer")
Пример #32
0
def test_relay_and_read_status_error():
    intent_action = CallTestContract(TEST_CONTRACT).fail1()
    intent = Intent(intent_action=intent_action)

    signed_intent = wallet.sign(intent)

    response = signed_intent.relay()

    assert response.status_code == 201 or response.status_code == 200
    assert wait_until(lambda: signed_intent.status()["code"] == "completed",
                      640)
    time.sleep(5)

    assert not signed_intent.status()["receipt"]["success"]
    success_intents.append(signed_intent)
    assert signed_intent.status(
    )["receipt"]["result"]["error"] == "This is the error 1"
Пример #33
0
 def test_deleting_expense(self):
     self.browser.get(SLATE_URL)
     # We want to verify that this expense is deleted.
     add_expense(self.browser, '7.50', 'Food (out)', 'Burrito')
     self.browser.get('%s/account/settings' % SLATE_URL)
     click(self.browser, '//tr//td[text()="Food (out)"]/..//a[text()="Edit"]')
     click(self.browser, '//input[@value="Delete"]', '//h3[text()="Edit category"]')
     self.browser.switch_to.alert.accept()
     self.browser.get(SLATE_URL)
     select = Select(
         wait_until(self.browser, '//select[@name="category_id"]')
     )
     for option in select.options:
         self.assertTrue(option.text != 'Food (out)')
     self.browser.get('%s/expenses' % SLATE_URL)
     self.assertFalse(
         exists_by_xpath(self.browser, '//td[text()="Food (out)"]')
     )
Пример #34
0
def test_relay_and_read_status_success():
    wallet_receiver = Wallet(from_bytes(os.urandom(32)))

    intent_action = ERC20(TEST_ERC20).transfer(wallet_receiver.address, 0)
    intent = Intent(intent_action=intent_action)

    signed_intent = wallet.sign(intent)

    response = signed_intent.relay()

    assert response.status_code == 201 or response.status_code == 200
    assert wait_until(lambda: signed_intent.status()["code"] == "completed",
                      640)
    time.sleep(5)

    assert signed_intent.status()["receipt"]["success"]
    success_intents.append(signed_intent)
    assert signed_intent.status()["receipt"]["result"]["output"][0]
Пример #35
0
def test_read_receipt_abi():
    intent_action = Contract(abi_test_contract)(TEST_CONTRACT).call2()
    intent = Intent(intent_action=intent_action,
                    salt=from_bytes(os.urandom(32)))

    signed_intent = wallet.sign(intent)

    response = signed_intent.relay()

    assert response.status_code == 201 or response.status_code == 200
    assert wait_until(lambda: signed_intent.status()["code"] == "completed",
                      640)
    time.sleep(5)

    assert signed_intent.status()["receipt"]["success"]
    success_intents.append(signed_intent)
    assert signed_intent.status(
    )["receipt"]["result"]["output"][0] == "This is the return of the call2"
Пример #36
0
def test_read_receipt_multiple():
    intent_action = CallTestContract(TEST_CONTRACT).call3()
    intent = Intent(intent_action=intent_action,
                    salt=from_bytes(os.urandom(32)))

    signed_intent = wallet.sign(intent)

    response = signed_intent.relay()

    assert response.status_code == 201 or response.status_code == 200
    assert wait_until(lambda: signed_intent.status()["code"] == "completed",
                      640)
    time.sleep(5)

    assert signed_intent.status()["receipt"]["success"]
    success_intents.append(signed_intent)
    assert signed_intent.status(
    )["receipt"]["result"]["output"][0] == wallet.address
    assert signed_intent.status()["receipt"]["result"]["output"][1] == int(
        wallet.address, 16) * 9
async def step_impl(context, amount):
    indexer = context.client
    api_height = context.last_height
    # get height from indexer
    await indexer.get_transactions("n37eRzDmZhDcLxRixwEvmemeWBHUUoRSRM")

    # wait for txid response
    def check_responses():
        if len(indexer.responses) > 0:
            res = json.loads(indexer.responses[-1:][0])
            print("Indexer height = {}".format(res["height"]))
            if res['height'] > api_height - amount and res[
                    'height'] < api_height + amount:
                # in sync
                return True

    if not utils.wait_until(check_responses,
                            timeout=10,
                            label="Waiting for latest tx from indexer"):
        raise Exception(
            "Did not receive correct indexer height for 10 seconds")
Пример #38
0
    parser.add_argument(
        "--duration",
        type=is_valid_duration,
        default=60,
        help=
        "How long to take to transition from the start to end volume, in seconds. Default is 60."
    )
    parser.add_argument("--shuffle",
                        action="store_true",
                        help="Whether to shuffle the playlist.")
    args = parser.parse_args()

    if args.start_volume > args.end_volume:
        raise argparse.ArgumentTypeError(
            "The start volume must be less than or equal to the end volume.")

    print("Alarm will ring at " + args.time + ".")

    wait_until(args.time)

    print("Alarm ringing!")

    music_player = MusicPlayer()
    for dir in args.song_dirs:
        music_player.enqueue_dir(dir)

    music_player.play(start_volume=args.start_volume,
                      end_volume=args.end_volume,
                      duration=args.duration,
                      shuffle=args.shuffle)
Пример #39
0
    def test_custom_resource_definition_deploy(self,
                                               custom_resource_definition,
                                               service_type, kind_logger):
        with kind_logger():
            name, fiaas_application, expected = custom_resource_definition

            # check that k8s objects for name doesn't already exist
            kinds = self._select_kinds(expected)
            for kind in kinds:
                with pytest.raises(NotFound):
                    kind.get(name)

            # First deploy
            fiaas_application.save()

            # Check that deployment status is RUNNING
            def _assert_status():
                status = FiaasApplicationStatus.get(
                    create_name(name, DEPLOYMENT_ID1))
                assert status.result == u"RUNNING"
                assert len(status.logs) > 0
                assert any(
                    "Saving result RUNNING for default/{}".format(name) in l
                    for l in status.logs)

            wait_until(_assert_status, patience=PATIENCE)

            # Check that annotations and labels are applied to status object
            status_labels = fiaas_application.spec.additional_labels.status
            if status_labels:
                status = FiaasApplicationStatus.get(
                    create_name(name, DEPLOYMENT_ID1))
                label_difference = status_labels.viewitems(
                ) - status.metadata.labels.viewitems()
                assert label_difference == set()

            # Check deploy success
            wait_until(_deploy_success(name, kinds, service_type, IMAGE1,
                                       expected, DEPLOYMENT_ID1),
                       patience=PATIENCE)

            # Redeploy, new image, possibly new init-container
            fiaas_application.spec.image = IMAGE2
            fiaas_application.metadata.labels[
                "fiaas/deployment_id"] = DEPLOYMENT_ID2
            strongbox_groups = []
            if "strongbox" in name:
                strongbox_groups = ["foo", "bar"]
                fiaas_application.spec.config["extensions"]["strongbox"][
                    "groups"] = strongbox_groups
            fiaas_application.save()

            # Check success
            wait_until(_deploy_success(name, kinds, service_type, IMAGE2,
                                       expected, DEPLOYMENT_ID2,
                                       strongbox_groups),
                       patience=PATIENCE)

            # Cleanup
            FiaasApplication.delete(name)

            def cleanup_complete():
                for kind in kinds:
                    with pytest.raises(NotFound):
                        kind.get(name)

            wait_until(cleanup_complete, patience=PATIENCE)
Пример #40
0
    def fdd(self, request, kubernetes, service_type, k8s_version,
            use_docker_for_e2e):
        port = get_unbound_port()
        cert_path = os.path.dirname(kubernetes["api-cert"])
        docker_args = use_docker_for_e2e(
            request, cert_path, service_type, k8s_version, port,
            kubernetes['container-to-container-server-ip'])
        server = kubernetes[
            'container-to-container-server'] if docker_args else kubernetes[
                "host-to-container-server"]
        args = [
            "fiaas-deploy-daemon",
            "--port",
            str(port),
            "--api-server",
            server,
            "--api-cert",
            kubernetes["api-cert"],
            "--client-cert",
            kubernetes["client-cert"],
            "--client-key",
            kubernetes["client-key"],
            "--service-type",
            service_type,
            "--ingress-suffix",
            "svc.test.example.com",
            "--environment",
            "test",
            "--datadog-container-image",
            "DATADOG_IMAGE:tag",
            "--strongbox-init-container-image",
            "STRONGBOX_IMAGE",
            "--use-ingress-tls",
            "default_off",
        ]
        if crd_supported(k8s_version):
            args.append("--enable-crd-support")
        args = docker_args + args
        fdd = subprocess.Popen(args,
                               stdout=sys.stderr,
                               env=merge_dicts(os.environ,
                                               {"NAMESPACE": "default"}))
        time.sleep(1)
        if fdd.poll() is not None:
            pytest.fail(
                "fiaas-deploy-daemon has crashed after startup, inspect logs")

        def ready():
            resp = requests.get("http://localhost:{}/healthz".format(port),
                                timeout=TIMEOUT)
            resp.raise_for_status()

        try:
            wait_until(ready,
                       "web-interface healthy",
                       RuntimeError,
                       patience=PATIENCE)
            if crd_supported(k8s_version):
                wait_until(crd_available(kubernetes, timeout=TIMEOUT),
                           "CRD available",
                           RuntimeError,
                           patience=PATIENCE)
            yield "http://localhost:{}/fiaas".format(port)
        finally:
            self._end_popen(fdd)
Пример #41
0
    def test_simulation_context(self):
        config_file = self.createTestPath("local/batch_sim/sleepy_time.ert")
        with ErtTestContext("res/sim/simulation_context",
                            config_file) as test_context:
            ert = test_context.getErt()

            size = 4
            even_mask = BoolVector(initial_size=size)
            odd_mask = BoolVector(initial_size=size)

            for iens_2 in range(size // 2):
                even_mask[2 * iens_2] = True
                even_mask[2 * iens_2 + 1] = False

                odd_mask[2 * iens_2] = False
                odd_mask[2 * iens_2 + 1] = True

            fs_manager = ert.getEnkfFsManager()
            even_half = fs_manager.getFileSystem("even_half")
            odd_half = fs_manager.getFileSystem("odd_half")

            # i represents geo_id
            case_data = [(i, {}) for i in range(size)]
            even_ctx = SimulationContext(ert, even_half, even_mask, 0,
                                         case_data)
            odd_ctx = SimulationContext(ert, odd_half, odd_mask, 0, case_data)

            for iens in range(size):
                # do we have the proper geo_id in run_args?
                if iens % 2 == 0:
                    self.assertFalse(even_ctx.isRealizationFinished(iens))
                    self.assertEqual(even_ctx.get_run_args(iens).geo_id, iens)
                else:
                    self.assertFalse(odd_ctx.isRealizationFinished(iens))
                    self.assertEqual(odd_ctx.get_run_args(iens).geo_id, iens)

            def any_is_running():
                return even_ctx.isRunning() or odd_ctx.isRunning()

            wait_until(func=(lambda: self.assertFalse(any_is_running())),
                       timeout=90)

            self.assertEqual(even_ctx.getNumFailed(), 0)
            self.assertEqual(even_ctx.getNumRunning(), 0)
            self.assertEqual(even_ctx.getNumSuccess(), size / 2)

            self.assertEqual(odd_ctx.getNumFailed(), 0)
            self.assertEqual(odd_ctx.getNumRunning(), 0)
            self.assertEqual(odd_ctx.getNumSuccess(), size / 2)

            even_state_map = even_half.getStateMap()
            odd_state_map = odd_half.getStateMap()

            for iens in range(size):
                if iens % 2 == 0:
                    self.assertTrue(even_ctx.didRealizationSucceed(iens))
                    self.assertFalse(even_ctx.didRealizationFail(iens))
                    self.assertTrue(even_ctx.isRealizationFinished(iens))

                    self.assertEqual(even_state_map[iens],
                                     RealizationStateEnum.STATE_HAS_DATA)
                else:
                    self.assertTrue(odd_ctx.didRealizationSucceed(iens))
                    self.assertFalse(odd_ctx.didRealizationFail(iens))
                    self.assertTrue(odd_ctx.isRealizationFinished(iens))

                    self.assertEqual(odd_state_map[iens],
                                     RealizationStateEnum.STATE_HAS_DATA)
Пример #42
0
 def wait_for_startup(self):
     if not utils.wait_until(
             self.is_running, timeout=60, label="Waiting for node startup"):
         raise Exception("Failed to wait for node startup. {}".format(self))
Пример #43
0
 def test_no_user_categories(self):
     self.browser.get(SLATE_URL)
     elem = wait_until(self.browser, '//select[@name="category_id"]')
     select = Select(elem)
     self.assertTrue(len(select.options) == 1)
Пример #44
0
 def confirm_transactions(self, coin_node, lit_node, no_transactions):
     wait_until(lambda: coin_node.getmempoolinfo().json()['result']['size'] == no_transactions)
     coin_node.generate(1)
     self.chain_height += 1
     wait_until(lambda: lit_node.Balance()['result']["Balances"][0]["SyncHeight"] == self.chain_height)
Пример #45
0
    def engage_with_active_users_from_target(self, target_account,
                                             temp_like_limit):
        """
			go to target_account's page 
			open recent post
			Click "likes" to view who has liked recent post
			Scroll down likes window and collect active users
			Visit each active_user and like last 3 posts based on filters 
		"""
        self.stop_count = temp_like_limit

        # Land on target_account page
        self.driver.get('https://instagram.com/' + target_account)
        utils.rand_wait_sec()

        # Open target_account most recent post # input('Open recent post: ')
        self.target_open_recent_post()

        # View most recent likes of recent post
        likes_window_visible = False
        while not likes_window_visible:
            #m82CD class name of "Likes" header of the "Likes" pop up window
            try:
                # Click number of likes likes"
                #self.driver.find_element_by_partial_link_text('likes').click()
                self.driver.find_element_by_class_name("zV_Nj").click()
                #rand_wait_sec()
                utils.rand_wait_sec()

                # Wait and see if "Likes" header is visible
                utils.wait_until(
                    self.driver,
                    ec.presence_of_element_located((By.CLASS_NAME, 'm82CD')))
                likes_window_visible = True

            except (NoSuchElementException, TimeoutException):
                print(
                    f"Re-opening \"Likes\" pop up window because Instagram auto closed it. "
                )

        # Note, instagram doesn't display all accounts at the same time
        # so you have to scroll down, scrape the page, scroll down again scrape the page
        active_users_list_of_sets = []
        scroll_count = 4
        print(
            f"Manually scroll down \"Likes\" pop up window {scroll_count} times"
        )
        for i in range(scroll_count
                       ):  # Increase range to increase number of Active Users
            input(f"manual scroll down {i+1} (press Enter to continue): ")

            # Method 1: (doesn't grab scroll bar)
            #input(f"method 1: auto scroll down {i+1}: ")
            #self.driver.find_element_by_css_selector('body').send_keys(Keys.PAGE_DOWN)
            #actions.drag_and_drop_by_offset(element, 50, 50)
            #actions.perform()

            # Method 2:
            #input(f"method 2: auto scroll down {i+1}: ")
            # likes_window_elems = self.driver.find_elements_by_class_name('_1XyCr')
            # likes_window_elems[0].send_keys(Keys.PAGE_DOWN)

            # Scrape all hyperlinks
            href_elems = self.driver.find_elements_by_xpath("//a[@href]")
            hrefs = []
            for elem in href_elems:
                if elem.get_attribute("href").startswith(
                        "https://www.instagram.com/"):
                    hrefs.append(elem.get_attribute("href"))
            # Return only instagram account usernames from list of all hyperlinks
            active_users_list_of_sets.append(
                utils.get_active_users_in_href_elem(hrefs, target_account))

        active_users = utils.active_users_to_set(active_users_list_of_sets,
                                                 target_account)

        print(f"Number of Active Users: {len(active_users)}")
        print(f"Active Users: ")
        for account in active_users:
            print(account)

        #input(f"Press Enter to close: ")
        # Go to active user pages and like recent posts
        self.iterate_through_active_users(active_users)
Пример #46
0
class RemoteWebKitClient(object):
    def __init__(self, communicator):
        self._communicator = communicator
        self._timeline_started = False
        self._network_enabled = False
        self._can_clear_cache = None
        self._has_heap_profiler = None
        self._heap_profiling_started = False
        self._profiling_enabled = False
        self._debugging_enabled = False
        self._page_events_enabled = False
        self._css_profiling_started = False
        self._css_profile = None

    def stop(self):
        """
    Releases websocket being used for debugging
    """
        self._communicator.stop()

    # --------------------------------------------------------------------------
    # RUNTIME
    # --------------------------------------------------------------------------

    def run_js(self, js, is_expression=True):
        """
    Runs JS in the current browser page, and returns the value if the JS is an expression.
    Otherwise, returns None.
    """
        self._got_js_result = False
        self._js_result = None

        def response_handler(response):
            if not is_expression:
                self._got_js_result = True
            elif 'result' in response and 'result' in response['result']:
                if 'wasThrown' in response['result']['result']:
                    log.error(
                        'Received error after running JS: {0}\n{1}'.format(
                            js, response['result']))

                self._js_result = response['result']['result']['value']
                self._got_js_result = True

        self._communicator.send_cmd('Runtime.evaluate', {
            'expression': js,
            'objectGroup': 'group',
            'returnByValue': True
        }, response_handler)
        self._communicator.send_cmd('Runtime.releaseObjectGroup',
                                    {'objectGroup': 'group'}, response_handler)

        wait_until(lambda: self._got_js_result)
        return self._js_result

    def get_window_performance(self):
        """
    Returns an object containing an assortment of performance timings
    See: https://dvcs.w3.org/hg/webperf/raw-file/tip/specs/NavigationTiming/Overview.html#sec-navigation-timing-interface
    """
        return self.run_js('window.performance')['timing']

    # --------------------------------------------------------------------------
    # MEMORY
    # --------------------------------------------------------------------------

    def get_dom_node_count(self):
        """Returns an object containing groups of DOM nodes and their respective sizes"""
        self._dom_node_count = None

        def response_handler(response):
            self._dom_node_count = response['result']

        self._communicator.send_cmd('Memory.getDOMNodeCount', {},
                                    response_handler)

        wait_until(lambda: self._dom_node_count)
        return self._dom_node_count

    def get_proc_memory_info(self):
        """
    Returns information about all the memory being used by the process (i.e. memory for DOM,
    JS, etc.)

    NOTE: Memory.getProcessMemoryDistribution was added to WebKit in May 2012. If the
    connected browser is a release from before then, this will NOT work. Tested to work
    in Chrome Canary as of 7-12-2012 (Chrome v.22).
    """

        log.error(
            'get_proc_memory_info: This function only works on the very latest browsers. Do not use.'
        )

        self._memory_info = None

        def response_handler(response):
            try:
                self._memory_info = response['result']
            except KeyError, e:
                log.error(
                    'Browser is too old to feature Memory.getProcessMemoryDistribution'
                )

        self._communicator.send_cmd('Memory.getProcessMemoryDistribution', {},
                                    response_handler)

        wait_until(lambda: self._memory_info)
        return self._memory_info
Пример #47
0
    def iterate_through_active_users(self, active_account_list):
        """
			iterate through list of active users			 
			filters: 
				active user has more than follower_lower_bound limit, 
				post has more than post_lower_bound limit (set in like_post function),
			call like_post() function
			track number of posts liked
		"""

        follower_lower_bound = 50  # Change back to 200
        for account_url in active_account_list:
            username = account_url[len("https://www.instagram.com/"):].rstrip(
                '/')

            self.driver.get(account_url)
            utils.rand_wait_sec(30, 45)

            try:
                # Wait until username is loaded on page
                name_loaded = False
                x_path_string = f"//*[contains(text(), {username})]"
                while not name_loaded:
                    utils.wait_until(
                        self.driver,
                        ec.presence_of_element_located(
                            (By.XPATH, x_path_string)))

                    webpage_text = self.driver.find_element_by_xpath(
                        x_path_string).text

                    if len(webpage_text) > 0:
                        name_loaded = True
            except:
                print(f"Error on {username}: ")
                print(f"Couldn't find username. \n" +
                      f"Most likely user has no text in profile. \n" +
                      f"Press Enter to try to continue: ")

            # Check if account is private
            is_private = self.driver.find_elements_by_xpath(
                "//*[contains(text(), 'This Account is Private')]")
            no_pics_posted = self.driver.find_elements_by_xpath(
                "//*[contains(text(), 'No Posts Yet')]")

            if (len(is_private) == 0) and (len(no_pics_posted) == 0):
                print(f"{username} is public")
                post_and_descriptions = {
                }  # dictionary: key=post urls and value= instagram's image description

                web_elems = self.driver.find_elements_by_class_name('FFVAD')
                descriptions = []
                for web_elem in web_elems:
                    try:
                        text_alt = web_elem.get_attribute("alt")
                        descriptions.append(text_alt)
                    except:
                        print("couldn't get text")
                descriptions = descriptions[:3]

                main_elements = self.driver.find_element_by_tag_name(
                    'main')  # Get Links
                hyperlink_elems = main_elements.find_elements_by_tag_name('a')

                media1 = None
                if media1 is None:  #All kmedia types
                    media1 = ['', 'Post', 'Video']
                elif media1 == 'Photo':  #Posts w/ multiple pics
                    media1 = ['', 'Post']
                else:  #Make it a list
                    media1 = [media]

                try:
                    if hyperlink_elems:
                        active_user_posts_all = [
                            link_elem.get_attribute('href')
                            for link_elem in hyperlink_elems
                            if link_elem and link_elem.text in media1
                        ]
                        active_user_recent_posts = []
                        active_user_recent_posts = active_user_posts_all[:3]

                        # Create dictionary of post urls with instagram description tags
                        post_and_descriptions[
                            active_user_recent_posts[0]] = descriptions[0]
                        post_and_descriptions[
                            active_user_recent_posts[1]] = descriptions[1]
                        post_and_descriptions[
                            active_user_recent_posts[2]] = descriptions[2]

                        follower_count = self.number_of_followers(self.driver)
                        # Like 3 most recent posts
                        if follower_count > follower_lower_bound:
                            # Only start opening photos if account has more than 200 followers

                            for key in post_and_descriptions:
                                post_url = key
                                post_description = post_and_descriptions[key]
                                print(f"Post link: {post_url}")
                                print(f"Post Description: {post_description}")

                                post_passed_filter = utils.post_description_filter(
                                    post_description)
                                successful_like = self.like_post(post_url)

                                if successful_like:
                                    self.like_count += 1
                                    post_tracker.liked_count += 1
                                    print(f"Liked {self.like_count} photos")

                                if self.like_count >= self.stop_count:
                                    return

                except:
                    #error = sys.exc_info()[0] # <class 'IndexError'>
                    #print(f"{error}")
                    print(f"didn't have at least 3 posts")

            else:
                print(f"{username} is private")

            print()
Пример #48
0
    def test_terminate_jobs(self):

        # Executes it self recursively and sleeps for 100 seconds
        with open("dummy_executable", "w") as f:
            f.write(
                """#!/usr/bin/env python
import sys, os, time
counter = eval(sys.argv[1])
if counter > 0:
    os.fork()
    os.execv(sys.argv[0],[sys.argv[0], str(counter - 1) ])
else:
    time.sleep(100)"""
            )

        executable = os.path.realpath("dummy_executable")
        os.chmod("dummy_executable", stat.S_IRWXU | stat.S_IRWXO | stat.S_IRWXG)

        self.job_list = {
            "umask": "0002",
            "DATA_ROOT": "",
            "global_environment": {},
            "global_update_path": {},
            "jobList": [
                {
                    "name": "dummy_executable",
                    "executable": executable,
                    "target_file": None,
                    "error_file": None,
                    "start_file": None,
                    "stdout": "dummy.stdout",
                    "stderr": "dummy.stderr",
                    "stdin": None,
                    "argList": ["3"],
                    "environment": None,
                    "exec_env": None,
                    "license_path": None,
                    "max_running_minutes": None,
                    "max_running": None,
                    "min_arg": 1,
                    "arg_types": [],
                    "max_arg": None,
                }
            ],
            "run_id": "",
            "ert_pid": "",
        }

        with open("jobs.json", "w") as f:
            f.write(json.dumps(self.job_list))

        # macOS doesn't provide /usr/bin/setsid, so we roll our own
        with open("setsid", "w") as f:
            f.write(
                dedent(
                    """\
                #!/usr/bin/env python
                import os
                import sys
                os.setsid()
                os.execvp(sys.argv[1], sys.argv[1:])
                """
                )
            )
        os.chmod("setsid", 0o755)

        job_dispatch_script = importlib.util.find_spec("job_runner.job_dispatch").origin
        job_dispatch_process = Popen(
            [
                os.getcwd() + "/setsid",
                sys.executable,
                job_dispatch_script,
                os.getcwd(),
            ]
        )

        p = psutil.Process(job_dispatch_process.pid)

        # Three levels of processes should spawn 8 children in total
        wait_until(lambda: self.assertEqual(len(p.children(recursive=True)), 8))

        p.terminate()

        wait_until(lambda: self.assertEqual(len(p.children(recursive=True)), 0))

        os.wait()  # allow os to clean up zombie processes
Пример #49
0
 def test_adding_expense(self):
     add_expense(self.browser, '7.50', 'Food (out)', 'Burrito')
     td = wait_until(self.browser, '//table//tbody//td[1]')
     self.assertTrue(float(td.text), '7.50')