Ejemplo n.º 1
0
 def test_reschedule_campaign_with_valid_data(
         self, token_first, campaign_in_db, talent_pool, candidate_first,
         smartlist_first, schedule_a_campaign, candidate_device_first):
     """
     Reschedule a campaign with valid data and it should return 200 response.
     """
     sleep(15)
     data = generate_campaign_schedule_data(frequency_id=Frequency.DAILY)
     response = send_request(
         'put', PushCampaignApiUrl.SCHEDULE % campaign_in_db['id'],
         token_first, data)
     assert response.status_code == codes.OK
     response = response.json()
     assert 'task_id' in response
     assert 'message' in response
     task_id = response['task_id']
     assert task_id
     # campaign = get_campaign(campaign_in_db['id'], token_first)['campaign']
     # match_schedule_data(data, campaign)
     retry(get_blasts,
           attempts=30,
           sleepscale=1,
           sleeptime=3,
           retry_exceptions=(AssertionError, ),
           args=(campaign_in_db['id'], token_first),
           kwargs={'count': 2})
Ejemplo n.º 2
0
    def test_schedule_a_campaign_with_user_from_same_domain(
            self, smartlist_first, campaign_in_db, talent_pool, token_first,
            token_same_domain, candidate_device_first):
        """
        In this test, we will schedule a campaign using different user's auth token, but user is from same domain ,
        as the actual owner of the campaign. So we are expecting that , response will be OK and campaign will be
        scheduled.
        """

        campaign_id = campaign_in_db['id']
        data = generate_campaign_schedule_data(frequency_id=Frequency.DAILY)
        response = schedule_campaign(campaign_id,
                                     data,
                                     token_same_domain,
                                     expected_status=(codes.OK, ))
        assert 'task_id' in response
        assert 'message' in response
        task_id = response['task_id']
        assert task_id

        # campaign = get_campaign(campaign_in_db['id'], token_first)['campaign']
        # match_schedule_data(data, campaign)

        # There should be a campaign schedule activity
        assert_activity(Activity.MessageIds.CAMPAIGN_SCHEDULE,
                        campaign_in_db['id'], 'push_campaign',
                        token_same_domain)

        retry(get_blasts,
              sleeptime=3,
              attempts=30,
              sleepscale=1,
              retry_exceptions=(AssertionError, ),
              args=(campaign_id, token_first),
              kwargs={'count': 1})
Ejemplo n.º 3
0
    def test_reschedule_a_campaign_with_user_from_same_domain(
            self, token_first, token_same_domain, campaign_in_db,
            schedule_a_campaign):
        """
        In this test, we will reschedule a campaign using different user's auth token, but user is from same domain ,
        as the actual owner of the campaign. So we are expecting that , response will be OK and campaign will be
        rescheduled.
        """
        # schedule_a_campaign fixture schedules a job. So, wait for next 10 seconds for the job to run and increase
        # blast count
        sleep(10)
        data = generate_campaign_schedule_data(frequency_id=Frequency.DAILY)
        response = reschedule_campaign(campaign_in_db['id'],
                                       data,
                                       token_same_domain,
                                       expected_status=(codes.OK, ))
        assert 'task_id' in response
        assert 'message' in response
        task_id = response['task_id']
        assert task_id

        # campaign = get_campaign(campaign_in_db['id'], token_first)['campaign']
        # match_schedule_data(data, campaign)
        retry(get_blasts,
              attempts=30,
              sleepscale=1,
              sleeptime=3,
              retry_exceptions=(AssertionError, ),
              args=(campaign_in_db['id'], token_first),
              kwargs={'count': 2})
Ejemplo n.º 4
0
    def test_schedule_a_campaign_with_valid_data(self, smartlist_first,
                                                 campaign_in_db, talent_pool,
                                                 token_first,
                                                 candidate_device_first):
        """
        In this test, we will schedule a campaign with all valid data and it should return an OK response and campaign
        should be scheduled.
        """
        data = generate_campaign_schedule_data()
        response = schedule_campaign(campaign_in_db['id'],
                                     data,
                                     token_first,
                                     expected_status=(codes.OK, ))
        assert 'task_id' in response
        assert 'message' in response
        task_id = response['task_id']
        assert task_id

        # There should be a campaign schedule activity
        assert_activity(Activity.MessageIds.CAMPAIGN_SCHEDULE,
                        campaign_in_db['id'], 'push_campaign', token_first)

        # campaign = get_campaign(campaign_in_db['id'], token_first)['campaign']
        # match_schedule_data(data, campaign)
        retry(get_blasts,
              sleeptime=3,
              attempts=20,
              sleepscale=1,
              retry_exceptions=(AssertionError, ),
              args=(campaign_in_db['id'], token_first),
              kwargs={'count': 1})
Ejemplo n.º 5
0
def smartlist_with_two_candidates_with_no_device_associated(
        request, token_first, user_first, candidate_first,
        candidate_same_domain, talent_pipeline):
    """
    This fixture creates a smartlist that contains two candidates from domain_first.
    Both candidates do not have any push device associated with them.
    :param request: request object
    :param candidate_first: candidate object
    :param candidate_same_domain: candidate object
    :param token_first: access token for user_first
    :return: smartlist object (dict)
    """
    candidate_ids = [candidate_first['id'], candidate_same_domain['id']]
    time.sleep(10)
    smartlist = create_smartlist(candidate_ids, talent_pipeline['id'],
                                 token_first)['smartlist']
    smartlist_id = smartlist['id']
    retry(get_smartlist_candidates,
          sleeptime=3,
          attempts=50,
          sleepscale=1,
          retry_exceptions=(AssertionError, ),
          args=(smartlist_id, token_first),
          kwargs={'count': 2})
    return smartlist
Ejemplo n.º 6
0
    def fetch_history(self, context, data):
        try:
            retry(
                self._fetch_history,
                sleeptime=5,
                retry_exceptions=(ccxt_errors.RequestTimeout),
                args=(context, data),
                cleanup=lambda: self.log.warning("CCXT request timed out, retrying..."),
            )
            return True

        except ccxt_errors.ExchangeNotAvailable:
            self.log.error(
                f"{self.exchange} API is currently unavailable, skipping trading step"
            )
            return False

        except ccxt_errors.DDoSProtection:
            self.log.error("Hit Rate limit, skipping trade step")
            return False

        except SystemExit:
            self.log.warning("Not retrying history due to algo exit")
            return False

        except Exception:
            self.log.error("Could not fetch latest history", exec_info=True)
            return False
Ejemplo n.º 7
0
    def add_chain(self):
        from redo import retry
        import requests
        import pem

        def get_chain():
            r = requests.get(self.chain_url)
            r.raise_for_status()
            return r.text

        chain = retry(get_chain)

        req = {"chain": []}
        chain = pem.parse(chain)
        for i in range(len(chain)):
            cert = crypto.load_certificate(crypto.FILETYPE_PEM, str(chain[i]))
            der = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
            req["chain"].append(base64.b64encode(der))

        def post_chain():
            r = requests.post(self.log_url + '/ct/v1/add-chain', json=req)
            r.raise_for_status()
            return r.json()

        resp = retry(post_chain)
        sct = SignedCertificateTimestamp(resp)
        self.write_to_file(self.sct_filename, sct.to_rfc6962())
Ejemplo n.º 8
0
def download_file_from_url(url, local_dest, extract=False):
    """Receive a file in a URL and download it, i.e. for the hostutils tooltool manifest
    the url received would be formatted like this:
      config/tooltool-manifests/linux64/hostutils.manifest"""
    if os.path.exists(local_dest):
        LOG.info("file already exists at: %s" % local_dest)
        if not extract:
            return True
    else:
        LOG.info("downloading: %s to %s" % (url, local_dest))
        try:
            retry(urlretrieve, args=(url, local_dest), attempts=3, sleeptime=5)
        except Exception:
            LOG.error("Failed to download file: %s" % local_dest,
                      exc_info=True)
            if os.path.exists(local_dest):
                # delete partial downloaded file
                os.remove(local_dest)
            return False

    if not extract:
        return os.path.exists(local_dest)

    typ = archive_type(local_dest)
    if typ is None:
        LOG.info("Not able to determine archive type for: %s" % local_dest)
        return False

    extract_archive(local_dest, os.path.dirname(local_dest), typ)
    return True
Ejemplo n.º 9
0
    def consume(self):
        """ Start the consumer and read messages from Kafka sending self.batch_size messages
        to splunk in a perpetual loop. Attempt incremental backoff self.retry_attempts 
        times if any batch of messages gets a non 200 status code otherwise give up and exit.
        """

        self.consumer = self.getConsumer(self.client.topics[self.topic])

        # create splunk hec instance
        splunk_hec = hec(self.splunk_server, self.splunk_hec_port,
                         self.splunk_hec_channel, self.splunk_hec_token,
                         self.splunk_sourcetype, self.splunk_source,
                         self.use_https, self.verify_ssl, self.use_compression,
                         self.compresslevel)
        while (True):
            m = self.consumer.consume()

            # Append messages to list until we've hit self.batch_size
            if (len(self.messages) <= self.batch_size):
                self.messages.append(m.value)

            # Send messages to Splunk HEC
            if (len(self.messages) == self.batch_size):
                retry(self.sendToSplunk,
                      attempts=self.retry_attempts,
                      sleeptime=self.sleeptime,
                      max_sleeptime=self.max_sleeptime,
                      sleepscale=self.sleepscale,
                      jitter=self.jitter,
                      retry_exceptions=(Exception, ),
                      args=(splunk_hec, ))
Ejemplo n.º 10
0
    def cancel_order(self, order_param, symbol=None, params={}):
        """Cancel an open order.

        Parameters
        ----------
        order_param : str or Order
            The order_id or order object to cancel.
        symbol: str
            The tradingPair symbol
        params: dict, optional
            Extra parameters to pass to the exchange
        """
        log.info("canceling an order")
        order_id = order_param
        if isinstance(order_param, zp.Order) or \
                isinstance(order_param, Order):
            order_id = order_param.id

        if not self.simulate_orders:
            exchange_name = self.blotter.orders[order_id].asset.exchange
            exchange = self.exchanges[exchange_name]
            retry(action=exchange.cancel_order,
                  attempts=self.attempts['cancel_order_attempts'],
                  sleeptime=self.attempts['retry_sleeptime'],
                  retry_exceptions=(ExchangeRequestError, ),
                  cleanup=lambda: log.warn(
                      'attempting to cancel the order again'),
                  args=(order_id, symbol, params))
            self.blotter.cancel(order_id)
        else:
            self.blotter.cancel(order_id)
Ejemplo n.º 11
0
def migration(args):
    limit = int(args.limit)
    offset = 0
    url = URL % (limit, offset)
    response = retry(requests.get, args=(url, )).json()
    datasets = response['result']
    while len(datasets) > 0:
        for data in datasets:
            Testjob = Testjobs(data['slave'], data['result'],
                               data['build_system_type'], data['duration'],
                               data['platform'], data['buildtype'],
                               data['testtype'], data['bugid'], data['branch'],
                               data['revision'], data['date'],
                               data['failure_classification'],
                               data['failures'])
            try:
                session.add(Testjob)
                session.commit()

            except Exception as error:
                logging.warning(error)
                session.rollback()

            finally:
                session.close()

        # The process will move forward by set offset
        offset += limit
        url = URL % (limit, offset)
        response = retry(requests.get, args=(url, )).json()
        datasets = response['result']
Ejemplo n.º 12
0
def get_iam_last_access_details(iam, arn):
    '''Retrieves IAM last accessed details for the given user/group/role ARN'''
    job = redo.retry(iam.generate_service_last_accessed_details,
                     attempts=5,
                     sleeptime=5,
                     kwargs={'Arn': arn})
    job_id = job['JobId']
    marker = None
    service_results = []
    tries = 0

    while True:
        result = redo.retry(iam.get_service_last_accessed_details,
                            attempts=5,
                            sleeptime=5,
                            kwargs={'JobId': job_id})
        if result['JobStatus'] == 'IN_PROGRESS':
            print("Awaiting job")
        elif result['JobStatus'] == 'FAILED':
            raise Exception(f"Could not get access information for {arn}")
        else:
            service_results.extend(paginate_access_details(job_id, result))
            break
        time.sleep(10)
    return service_results
Ejemplo n.º 13
0
    def add_chain(self):
        from redo import retry
        import requests
        import pem

        def get_chain():
            r = requests.get(self.chain_url)
            r.raise_for_status()
            return r.text

        chain = retry(get_chain)

        req = {"chain": []}
        chain = pem.parse(chain)
        for i in range(len(chain)):
            cert = crypto.load_certificate(crypto.FILETYPE_PEM, str(chain[i]))
            der = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
            req["chain"].append(base64.b64encode(der))

        def post_chain():
            r = requests.post(self.log_url + '/ct/v1/add-chain', json=req)
            r.raise_for_status()
            return r.json()

        resp = retry(post_chain)
        sct = SignedCertificateTimestamp(resp)
        self.write_to_file(self.sct_filename, sct.to_rfc6962())
Ejemplo n.º 14
0
 def refresh_antivirus(self):
     self.info("Refreshing clamav db...")
     try:
         redo.retry(lambda: sh.freshclam("--stdout", "--verbose", _timeout=300, _err_to_out=True))
         self.info("Done.")
     except sh.ErrorReturnCode:
         self.warning("Freshclam failed, skipping DB update")
Ejemplo n.º 15
0
    def cancel_order(self, order_param, exchange_name, symbol=None, params={}):
        """Cancel an open order.

        Parameters
        ----------
        order_param : str or Order
            The order_id or order object to cancel.

        exchange_name: str
            The name of exchange to cancel the order in
        symbol: str
            The tradingPair symbol
        params: dict, optional
            Extra parameters to pass to the exchange
        """
        exchange = self.exchanges[exchange_name]

        order_id = order_param
        if isinstance(order_param, zp.Order):
            order_id = order_param.id

        retry(action=exchange.cancel_order,
              attempts=self.attempts['cancel_order_attempts'],
              sleeptime=self.attempts['retry_sleeptime'],
              retry_exceptions=(ExchangeRequestError, ),
              cleanup=lambda: log.warn('cancelling order again.'),
              args=(order_id, symbol, params))
Ejemplo n.º 16
0
    def test_campaign_send_to_smartlist_with_two_candidates_with_and_without_push_device(self, token_first,
                                                    campaign_with_two_candidates_with_and_without_push_device):
        """
        - This tests the endpoint /v1/push-campaigns/:id/send
        In this test I want to test the scenario that if a push campaign is being sent to multiple candidates and
        there is one or more but not all candidates that do not have a push device associated with them,
        then it should not raise an InvalidUsage error but sends should be equal to number of candidates
        that have devices associated with them.
        """
        campaign_id = campaign_with_two_candidates_with_and_without_push_device['id']
        send_campaign(campaign_id, token_first, expected_status=(codes.OK,))

        # Assert campaign send activity
        assert_activity(Activity.MessageIds.CAMPAIGN_SEND, campaign_id, 'push_campaign', token_first)

        response = retry(get_blasts, sleeptime=SLEEP_INTERVAL, attempts=RETRY_ATTEMPTS * 2, sleepscale=1,
                         retry_exceptions=(AssertionError,), args=(campaign_id, token_first), kwargs={'count': 1})
        blasts = response['blasts']
        blast_id = blasts[0]['id']

        # There should be only one send because second candidate in smartlist does not have any push device associated
        # with him.
        response = retry(get_blast_sends, sleeptime=SLEEP_INTERVAL, attempts=RETRY_ATTEMPTS * 2, sleepscale=1,
                         retry_exceptions=(AssertionError,), args=(blast_id, campaign_id, token_first),
                         kwargs={'count': 1})
        assert len(response['sends']) == 1
Ejemplo n.º 17
0
    def check_url(self, session, url):
        from redo import retry
        try:
            from urllib.parse import urlparse
        except ImportError:
            # Python 2
            from urlparse import urlparse

        mozilla_locations = [
            'download-installer.cdn.mozilla.net',
            'download.cdn.mozilla.net',
            'download.mozilla.org',
            'archive.mozilla.org',
        ]

        def do_check_url():
            self.log("Checking {}".format(url))
            r = session.head(url, verify=True, timeout=10, allow_redirects=True)
            try:
                r.raise_for_status()
            except Exception:
                self.error("FAIL: {}, status: {}".format(url, r.status_code))
                raise

            final_url = urlparse(r.url)
            if final_url.scheme != 'https':
                self.error('FAIL: URL scheme is not https: {}'.format(r.url))

            if final_url.netloc not in mozilla_locations:
                self.error('FAIL: host not in allowed locations: {}'.format(r.url))

        retry(do_check_url, sleeptime=3, max_sleeptime=10, attempts=3)
Ejemplo n.º 18
0
def submit_locale(task, config, auth0_secrets, backend_version):
    """Submit a release blob to balrog."""
    upstream_artifacts = get_upstream_artifacts(task)

    # Read the manifest from disk
    manifest = get_manifest(config, upstream_artifacts)

    suffixes = task["payload"].get("suffixes", [""])

    for e in manifest:
        for suffix in suffixes:
            # Get release metadata from manifest
            submitter, release = create_locale_submitter(
                e,
                suffix,
                auth0_secrets,
                config,
                backend_version=backend_version)
            # Connect to balrog and submit the metadata
            # Going back to the original number of attempts so that we avoid sleeping too much in between
            # retries to get Out-of-memory in the GCP workers. Until we figure out what's bumping the spike
            # in memory usage from 130 -> ~400 Mb, let's keep this as it was, historically
            retry(lambda: submitter.run(**release),
                  jitter=5,
                  sleeptime=10,
                  max_sleeptime=30,
                  attempts=10)
	def updateWorkloadStateTable(self,action):

		self.currentTime = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))

		if (action == Orchestrator.ACTION_START) and (self.scalingProfile is not None):
			UpdateExpressionAttr='SET Profile= :profile, LastActionTime= :currentTime, LastActionType= :actionType'
			ExpressionAttributeValuesAttr={
					':profile': self.scalingProfile,
					':currentTime': self.currentTime,
					':actionType': action
    					}

		else:
			UpdateExpressionAttr='SET LastActionTime= :currentTime, LastActionType= :actionType'
			ExpressionAttributeValuesAttr={
					':currentTime': self.currentTime,
					':actionType': action,
    					}
	
		try:
			retry(self.WorkloadStateTable.update_item, attempts=5, sleeptime=0,jitter=0, kwargs= {
    				"Key":{
				        'Workload': self.partitionTargetValue,
				    },
				    "UpdateExpression":UpdateExpressionAttr,
				    "ExpressionAttributeValues":ExpressionAttributeValuesAttr
				})
		except Exception as e:
			msg = 'Orchestrator::updateWorkloadStateTable() Exception encountered during DDB update %s -->' % e
			logger.error(msg + str(e))	
Ejemplo n.º 20
0
 def refresh_antivirus(self):
     self.info("Refreshing clamav db...")
     try:
         redo.retry(lambda: sh.freshclam(
             "--stdout", "--verbose", _timeout=300, _err_to_out=True))
         self.info("Done.")
     except sh.ErrorReturnCode:
         self.warning("Freshclam failed, skipping DB update")
Ejemplo n.º 21
0
 def testRetrySelectiveExceptionSucceed(self):
     retry(
         _raiseCustomException,
         attempts=2,
         sleeptime=0,
         jitter=0,
         retry_exceptions=(NewError,),
     )
Ejemplo n.º 22
0
def set_readonly(task, config, auth0_secrets):
    state_updater = create_state_updater(api_root=config["api_root"],
                                         auth0_secrets=auth0_secrets)
    args = [
        task["payload"]["product"].capitalize(), task["payload"]["version"],
        task["payload"]["build_number"]
    ]
    retry(lambda: state_updater.run(*args))
 def testRetrySelectiveExceptionSucceed(self):
     retry(
         _raiseCustomException,
         attempts=2,
         sleeptime=0,
         jitter=0,
         retry_exceptions=(NewError, ),
     )
Ejemplo n.º 24
0
def slavealloc_disable(self, machine, loanid):
    try:
        url = furl(current_app.config.get("SLAVEAPI_URL", None))
        url.path.add(machine).add("actions").add("disable")
        loan_bug = Loans.query.get(loanid).bug_id
        postdata = dict(reason="Being loaned on slaveloan bug %s" % loan_bug)
        retry(requests.post, args=(str(url),), kwargs=dict(data=postdata)).json()
        return machine
    except Exception as exc:  # pylint: disable=W0703
        logger.exception(exc)
        self.retry(exc=exc)
Ejemplo n.º 25
0
    def test_campaign_send_to_candidate_with_no_device(self, token_first, campaign_in_db):
        """
        In this test, we will send a campaign to a valid candidate (in same domain), but candidate
        has no device associated with him. So no campaign will be sent which will result in
        one blasts and no sends.
        """
        campaign_id = campaign_in_db['id']
        send_campaign(campaign_id, token_first, expected_status=(codes.OK,))

        retry(get_blasts, sleeptime=SLEEP_INTERVAL, attempts=RETRY_ATTEMPTS * 2, sleepscale=1,
              retry_exceptions=(AssertionError,), args=(campaign_id, token_first), kwargs={'count': 1})
        get_and_assert_zero(PushCampaignApiUrl.SENDS % campaign_id, 'sends', token_first)
Ejemplo n.º 26
0
def slavealloc_disable(self, machine, loanid):
    try:
        url = furl(current_app.config.get("SLAVEAPI_URL", None))
        url.path.add(machine).add("actions").add("disable")
        loan_bug = Loans.query.get(loanid).bug_id
        postdata = dict(reason="Being loaned on slaveloan bug %s" % loan_bug)
        retry(requests.post, args=(str(url), ),
              kwargs=dict(data=postdata)).json()
        return machine
    except Exception as exc:  # pylint: disable=W0703
        logger.exception(exc)
        self.retry(exc=exc)
Ejemplo n.º 27
0
 def _scan_files(self):
     """Scan the files we've collected. We do the download and scan concurrently to make
     it easier to have a coherent log afterwards. Uses the venv python."""
     self.info("Refreshing clamav db...")
     redo.retry(lambda:
         sh.freshclam("--stdout", "--verbose", _timeout=300, _err_to_out=True))
     self.info("Done.")
     external_tools_path = os.path.join(
                           os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))), 'external_tools')
     self.run_command([self.query_python_path(), os.path.join(external_tools_path,'extract_and_run_command.py'),
                      '-j{}'.format(self.config['scan_parallelization']),
                      'clamscan', '--no-summary', '--', self.dest_dir])
Ejemplo n.º 28
0
def submit_toplevel(task, config, balrog_auth):
    """Push a top-level release blob to balrog."""
    auth = balrog_auth
    partials = {}
    if task['payload'].get('partial_versions'):
        for v in task['payload']['partial_versions'].split(','):
            v = v.strip()  # we have whitespace after the comma
            version, build_number = v.split("build")
            partials[version] = {"buildNumber": build_number}

    suffixes = task['payload'].get('update_line', {}).keys() or ['']

    for suffix in suffixes:
        creator = create_creator(
            api_root=config['api_root'],
            auth=auth,
            dummy=config['dummy'],
            suffix=task['payload'].get('blob_suffix', '') + suffix,
            complete_mar_filename_pattern=task['payload'].get(
                'complete_mar_filename_pattern'),
            complete_mar_bouncer_product_pattern=task['payload'].get(
                'complete_mar_bouncer_product_pattern'),
        )

        retry(lambda: creator.run(
            appVersion=task['payload']['app_version'],
            productName=task['payload']['product'].capitalize(),
            version=task['payload']['version'],
            buildNumber=task['payload']['build_number'],
            updateChannels=task['payload']['channel_names'],
            ftpServer=task['payload']['archive_domain'],
            bouncerServer=task['payload']['download_domain'],
            enUSPlatforms=task['payload']['platforms'],
            hashFunction='sha512',
            partialUpdates=partials,
            requiresMirrors=task['payload']['require_mirrors'],
            updateLine=task['payload'].get('update_line', {}).get(suffix),
        ))

    pusher = create_pusher(
        api_root=config['api_root'],
        auth=auth,
        dummy=config['dummy'],
        suffix=task['payload'].get('blob_suffix', ''),
    )
    retry(lambda: pusher.run(
        productName=task['payload']['product'].capitalize(),
        version=task['payload']['version'],
        build_number=task['payload']['build_number'],
        rule_ids=task['payload']['rules_to_update'],
    ))
Ejemplo n.º 29
0
    def test_campaign_send_to_smartlist_with_two_candidates_with_no_push_device(self, token_first,
                                                    campaign_with_two_candidates_with_no_push_device_associated):
        """
        - This tests the endpoint /v1/push-campaigns/:id/send
        In this test I want to test the scenario that if a push campaign is being sent to multiple candidates
        and there is no push device associated with any candidate, then API will return OK response but no campaign
        will be sent. i.e. blasts = 1, sends = 0
        """
        campaign_id = campaign_with_two_candidates_with_no_push_device_associated['id']
        send_campaign(campaign_id, token_first, expected_status=(codes.OK,))

        retry(get_blasts, sleeptime=SLEEP_INTERVAL, attempts=RETRY_ATTEMPTS * 2, sleepscale=1,
              retry_exceptions=(AssertionError,), args=(campaign_id, token_first), kwargs={'count': 1})
        get_and_assert_zero(PushCampaignApiUrl.SENDS % campaign_id, 'sends', token_first)
Ejemplo n.º 30
0
    def wait_for_started(self):
        # late imports because we need the jenkins virtualenv to be activated
        # (this is done in the constructor)
        import redo
        import requests

        session = requests.Session()

        def wait_for_jenkins():
            if not session.get('http://localhost:8080').status_code == 200:
                raise Exception('Jenkins did not start successfully.')

        redo.retry(wait_for_jenkins, sleeptime=0.5, jitter=0, sleepscale=1,
                   attempts=120)
Ejemplo n.º 31
0
    def test_send_campaign_to_invalid_email_address(self, access_token_first,
                                                    user_first,
                                                    talent_pipeline):
        """
        In this test, we will send an email campaign to one candidate with invalid email address.
        After bounce, this email will be marked as bounced and when we will try to send this campaign
        through API, no email campaign will be sent, because only one candidate is associated with this campaign but
        his email address has been marked as Bounced.
        """
        with app.app_context():
            campaign = create_email_campaign_in_db(user_first.id)
            # create candidate
            email_campaign_blast, smartlist_id, candidate_ids = create_campaign_data(
                access_token_first,
                campaign.id,
                talent_pipeline,
                candidate_count=1)

            invalid_email = 'invalid_' + fake.uuid4() + '@gmail.com'
            email = CandidateEmail.get_email_by_candidate_id(candidate_ids[0])
            email.update(address=invalid_email)
            send_campaign_email_to_candidate(campaign,
                                             email,
                                             candidate_ids[0],
                                             blast_id=email_campaign_blast.id)
            retry(assert_is_bounced,
                  sleeptime=3,
                  attempts=100,
                  sleepscale=1,
                  args=(email, ),
                  retry_exceptions=(AssertionError, ))
            blast_url = EmailCampaignApiUrl.BLASTS % campaign.id
            campaign_blasts = CampaignsTestsHelpers.get_blasts_with_polling(
                campaign,
                timeout=300,
                blasts_url=blast_url,
                access_token=access_token_first)
            campaign_blast = campaign_blasts[0]
            assert campaign_blast['bounces'] == 1

            # Since there is no candidate associated with campaign with valid email, so no more blasts would be created
            response = requests.post(EmailCampaignApiUrl.SEND % campaign.id,
                                     headers=dict(Authorization='Bearer %s' %
                                                  access_token_first))
            assert response.status_code == requests.codes.OK
            CampaignsTestsHelpers.assert_campaign_blasts(
                campaign, 1, access_token=access_token_first, timeout=300)
            # without sleep, finalizer deletes campaign object and code fails to find this object when bounce occurs
            time.sleep(10)
Ejemplo n.º 32
0
    def test_search_for_candidate_in_pipeline(self, user_first,
                                              access_token_first, talent_pool):
        """
        Test: Use Pipeline search params to search for a candidate
        """
        user_first.role_id = Role.get_by_name('DOMAIN_ADMIN').id
        db.session.commit()
        skill_name = fake.word()

        # Create multiple candidates
        data = FakeCandidatesData.create(talent_pool=talent_pool,
                                         count=16,
                                         skill_name=skill_name)
        candidate_ids = create_candidates_from_candidate_api(
            access_token_first, data, return_candidate_ids_only=True)
        number_of_talent_pipelines = 15
        candidate_id = candidate_ids[-1]

        # Add Pipelines
        pipeline_data = {
            "talent_pipelines": [{
                "talent_pool_id":
                talent_pool.id,
                "name":
                'testing_{}'.format(str(fake.uuid4())[:5]),
                "date_needed":
                "2017-11-30",
                "search_params": {
                    "skills": [skill_name]
                }
            } for _ in range(number_of_talent_pipelines)]
        }

        create_resp = send_request('post', PIPELINE_URL, access_token_first,
                                   pipeline_data)
        print response_info(create_resp)

        # Ensure talent pipelines were created
        get_resp = send_request('get', PIPELINE_URL, access_token_first)
        assert set([tp['id'] for tp in get_resp.json()['talent_pipelines']
                    ]).issubset(create_resp.json()['talent_pipelines'])
        # Assert on results
        retry(assert_result,
              sleeptime=3,
              attempts=10,
              sleepscale=1,
              args=(candidate_id, access_token_first),
              kwargs=dict(expected_count=number_of_talent_pipelines),
              retry_exceptions=(AssertionError, ))
Ejemplo n.º 33
0
def submit_toplevel(task, config, auth0_secrets, backend_version):
    """Push a top-level release blob to balrog."""
    partials = {}
    if task["payload"].get("partial_versions"):
        for v in task["payload"]["partial_versions"].split(","):
            v = v.strip()  # we have whitespace after the comma
            version, build_number = v.split("build")
            partials[version] = {"buildNumber": build_number}

    suffixes = list(task["payload"].get("update_line", {}).keys()) or [""]

    for suffix in suffixes:
        creator = create_creator(
            api_root=config["api_root"],
            auth0_secrets=auth0_secrets,
            dummy=config["dummy"],
            suffix=task["payload"].get("blob_suffix", "") + suffix,
            complete_mar_filename_pattern=task["payload"].get(
                "complete_mar_filename_pattern"),
            complete_mar_bouncer_product_pattern=task["payload"].get(
                "complete_mar_bouncer_product_pattern"),
            backend_version=backend_version,
        )

        retry(lambda: creator.run(
            appVersion=task["payload"]["app_version"],
            productName=task["payload"]["product"].capitalize(),
            version=task["payload"]["version"],
            buildNumber=task["payload"]["build_number"],
            updateChannels=task["payload"]["channel_names"],
            ftpServer=task["payload"]["archive_domain"],
            bouncerServer=task["payload"]["download_domain"],
            enUSPlatforms=task["payload"]["platforms"],
            hashFunction="sha512",
            partialUpdates=partials,
            requiresMirrors=task["payload"]["require_mirrors"],
            updateLine=task["payload"].get("update_line", {}).get(suffix),
        ))

    pusher = create_pusher(api_root=config["api_root"],
                           auth0_secrets=auth0_secrets,
                           dummy=config["dummy"],
                           suffix=task["payload"].get("blob_suffix", ""))
    retry(lambda: pusher.run(
        productName=task["payload"]["product"].capitalize(),
        version=task["payload"]["version"],
        build_number=task["payload"]["build_number"],
        rule_ids=task["payload"]["rules_to_update"],
    ))
Ejemplo n.º 34
0
def submit_locale(task, config, auth0_secrets):
    """Submit a release blob to balrog."""
    upstream_artifacts = get_upstream_artifacts(task)

    # Read the manifest from disk
    manifest = get_manifest(config, upstream_artifacts)

    suffixes = task['payload'].get('suffixes', [''])

    for e in manifest:
        for suffix in suffixes:
            # Get release metadata from manifest
            submitter, release = create_locale_submitter(e, suffix, auth0_secrets, config)
            # Connect to balrog and submit the metadata
            retry(lambda: submitter.run(**release), jitter=5, sleeptime=10, max_sleeptime=30, attempts=20)
        def worker(item):
            source, destination = item

            def copy_key():
                source_key = bucket.get_key(source)
                dest_key = bucket.get_key(destination)
                # According to http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
                # S3 key MD5 is represented as ETag, except when objects are
                # uploaded using multipart method. In this case objects's ETag
                # is constructed using its MD5, minus symbol, and number of
                # part. See http://stackoverflow.com/questions/12186993/what-is-the-algorithm-to-compute-the-amazon-s3-etag-for-a-file-larger-than-5gb#answer-19896823
                source_md5 = source_key.etag.split("-")[0]
                if dest_key:
                    dest_md5 = dest_key.etag.split("-")[0]
                else:
                    dest_md5 = None

                if not dest_key:
                    self.info("Copying {} to {}".format(source, destination))
                    bucket.copy_key(destination, self.config["bucket_name"],
                                    source)
                elif source_md5 == dest_md5:
                    self.warning(
                        "{} already exists with the same content ({}), skipping copy".format(
                            destination, dest_md5))
                else:
                    self.fatal(
                        "{} already exists with the different content (src ETag: {}, dest ETag: {}), aborting".format(
                            destination, source_key.etag, dest_key.etag))

            return retry(copy_key, sleeptime=5, max_sleeptime=60,
                         retry_exceptions=(S3CopyError, S3ResponseError))
Ejemplo n.º 36
0
def query_pushes_by_pushid_range(repo_url, start_id, end_id, version=VERSION, return_revision_list=False):
    """
    Return an ordered list of pushes (oldest first).

    repo_url               - represents the URL to clone a repo
    start_id               - from which pushid to start with (oldest)
    end_id                 - from which pushid to end with (most recent)
    version                - version of json-pushes to use (see docs)
    return_revision_list   - return a list of revisions if it's True
    """
    push_list = []
    url = "%s?startID=%s&endID=%s&version=%s&tipsonly=1" % (
        JSON_PUSHES % {"repo_url": repo_url},
        start_id - 1,  # off by one to compensate for pushlog as it skips start_id
        end_id,
        version,
    )
    LOG.debug("About to fetch %s" % url)
    req = retry(requests.get, args=(url,))
    pushes = req.json()["pushes"]

    for push_id in sorted(pushes.keys()):
        # Querying by push ID is preferred because date ordering is
        # not guaranteed (due to system clock skew)
        # We can interact with self-serve with the 12 char representation
        push_list.append(Push(push_id=push_id, push_info=pushes[push_id]))
    if return_revision_list:
        return _pushes_to_list(push_list)

    return push_list
Ejemplo n.º 37
0
def update_runnableapi():
    """
    Use it to update runnablejobs.json file.
    """
    url = "https://index.taskcluster.net/v1/task/gecko.v2.%s.latest.firefox.decision/"
    latest_task = retry(requests.get, args=(url % "mozilla-inbound", ),
                        kwargs={'headers': {'accept-encoding': 'json'}, 'verify': True}).json()
    task_id = latest_task['taskId']

    # The format of expires is like 2017-07-04T22:13:23.248Z and we only want 2017-07-04 part
    expires = latest_task['expires'].split('T')[0]
    time_tuple = datetime.datetime.strptime(expires, "%Y-%m-%d").timetuple()
    new_timestamp = time.mktime(time_tuple)
    path = ROOT_DIR + '/runablejobs.json'

    # we do nothing if the timestamp of runablejobs.json is equal with the latest task
    # otherwise we download and update it
    if os.path.isfile(path):
        with open(path, 'r+') as data:
            # read the timesstamp of this task from json file
            oldtime = json.loads(data.read())['meta']['timetamp']
        if oldtime == new_timestamp:
            print "The runnable json file is latest already."
            return
        else:
            print "It's going to update your runnable jobs data."
            download_runnable_jobs(new_timestamp, task_id)
    else:
        print "It's going to help you download the runnable jobs file."
        download_runnable_jobs(new_timestamp, task_id)
Ejemplo n.º 38
0
def get_changed_files(repository, revision):
    """
    Get the set of files changed in the push headed by the given revision.
    Responses are cached, so multiple calls with the same arguments are OK.
    """
    key = repository, revision
    if key not in _cache:
        url = '%s/json-automationrelevance/%s' % (repository.rstrip('/'), revision)
        logger.debug("Querying version control for metadata: %s", url)

        def get_automationrelevance():
            response = requests.get(url, timeout=5)
            return response.json()
        contents = retry(get_automationrelevance, attempts=2, sleeptime=10)

        logger.debug('{} commits influencing task scheduling:'
                     .format(len(contents['changesets'])))
        changed_files = set()
        for c in contents['changesets']:
            logger.debug(" {cset} {desc}".format(
                cset=c['node'][0:12],
                desc=c['desc'].splitlines()[0].encode('ascii', 'ignore')))
            changed_files |= set(c['files'])

        _cache[key] = changed_files
    return _cache[key]
Ejemplo n.º 39
0
def query_push_by_revision(repo_url, revision, full=False, return_revision_list=False):
    """
    Return a dictionary with meta-data about a push including:

        * changesets
        * date
        * user
    repo_url               - represents the URL to clone a rep
    revision               - the revision used to set the query range
    full                   - query whole information of a push if it's True
    return_revision_list   - return a list of revisions if it's True
    """
    url = "%s?changeset=%s&tipsonly=1" % (JSON_PUSHES % {"repo_url": repo_url}, revision)
    if full:
        url += "&full=1"
    LOG.debug("About to fetch %s" % url)
    req = retry(requests.get, args=(url,))
    data = req.json()
    assert len(data) == 1, "We should only have information about one push"

    if not full:
        LOG.debug("Push info: %s" % str(data))
        push_id, push_info = data.popitem()
        push = Push(push_id=push_id, push_info=push_info)
    else:
        LOG.debug("Requesting the info with full=1 can yield too much unnecessary output " "to debug anything properly")
    if return_revision_list:
        return push.changesets[0].node

    return push
Ejemplo n.º 40
0
    def worker(item):
        source, destination = item

        def copy_key():
            if destination in to_keys_checksums:
                # compare md5
                if from_keys_checksums[source] != to_keys_checksums[
                        destination]:
                    raise ScriptWorkerTaskException(
                        "{} already exists with different content "
                        "(src etag: {}, dest etag: {}), aborting".format(
                            destination, from_keys_checksums[source],
                            to_keys_checksums[destination]))
                else:
                    log.warning(
                        "{} already exists with the same content ({}), "
                        "skipping copy".format(destination,
                                               to_keys_checksums[destination]))
            else:
                log.info("Copying {} to {}".format(source, destination))
                boto_client.copy_object(Bucket=context.bucket_name,
                                        CopySource={
                                            "Bucket": context.bucket_name,
                                            "Key": source
                                        },
                                        Key=destination)

        return retry(copy_key,
                     sleeptime=5,
                     max_sleeptime=60,
                     retry_exceptions=(ClientError, ))
Ejemplo n.º 41
0
def download_file(url, target, retry_sleep=RETRY_SLEEP, attempts=3):
    """Downloads a file, given an URL in the target path.

    The function will attempt several times on failures.
    """
    def _download_file(url, target):
        req = requests.get(url, stream=True, timeout=30)
        target_dir = target.parent.resolve()
        if str(target_dir) != "":
            target_dir.mkdir(exist_ok=True)

        with target.open("wb") as f:
            for chunk in req.iter_content(chunk_size=1024):
                if not chunk:
                    continue
                f.write(chunk)
                f.flush()
        return target

    return retry(
        _download_file,
        args=(url, target),
        attempts=attempts,
        sleeptime=retry_sleep,
        jitter=0,
    )
Ejemplo n.º 42
0
    def order(self, asset, amount, style, order_id=None):
        log.debug('ordering {} {}'.format(amount, asset.symbol))
        if amount == 0:
            log.warn('skipping 0 amount orders')
            return None

        if self.simulate_orders:
            return super(ExchangeBlotter, self).order(
                asset, amount, style, order_id
            )

        else:
            order = retry(
                action=self.exchange_order,
                attempts=self.attempts['order_attempts'],
                sleeptime=self.attempts['retry_sleeptime'],
                retry_exceptions=(ExchangeRequestError,),
                cleanup=lambda: log.warn('Ordering again.'),
                args=(asset, amount, style),
            )

            self.open_orders[order.asset].append(order)
            self.orders[order.id] = order
            self.new_orders.append(order)

            return order.id
Ejemplo n.º 43
0
    def _get_product_uptake(self, tuxedo_server_url, auth,
                            related_product, os):
        from redo import retry
        import requests

        url = get_tuxedo_uptake_url(tuxedo_server_url, related_product, os)
        self.info("Requesting {} from tuxedo".format(url))

        def get_tuxedo_page():
            r = requests.get(url, auth=auth,
                             verify=False, timeout=60)
            r.raise_for_status()
            return r.content

        def calculateUptake(page):
            doc = xml.dom.minidom.parseString(page)
            uptake_values = []

            for element in doc.getElementsByTagName('available'):
                for node in element.childNodes:
                    if node.nodeType == xml.dom.minidom.Node.TEXT_NODE and \
                            node.data.isdigit():
                        uptake_values.append(int(node.data))
            if not uptake_values:
                uptake_values = [0]
            return min(uptake_values)

        page = retry(get_tuxedo_page)
        uptake = calculateUptake(page)
        self.info("Current uptake for {} is {}".format(related_product, uptake))
        return uptake
Ejemplo n.º 44
0
def download_runnable_jobs(new_timestamp, task_id=None):
    if task_id:
        url = TREEHERDER_HOST.format('mozilla-inbound', task_id)
        data = retry(requests.get, args=(url, ), kwargs={'headers': headers}).json()
        if len(data['results']) > 0:
            data['meta'].update({'timetamp': new_timestamp})
            with open(ROOT_DIR + '/runnablejobs.json', 'w') as f:
                json.dump(data, f)
Ejemplo n.º 45
0
    def _update_bouncer_alias(self, tuxedo_server_url, auth,
                              related_product, alias):
        from redo import retry
        import requests

        url = "%s/create_update_alias" % tuxedo_server_url
        data = {"alias": alias, "related_product": related_product}
        self.log("Updating {} to point to {} using {}".format(alias,
                                                              related_product,
                                                              url))

        # Wrap the real call to hide credentials from retry's logging
        def do_update_bouncer_alias():
            r = requests.post(url, data=data, auth=auth,
                              verify=False, timeout=60)
            r.raise_for_status()

        retry(do_update_bouncer_alias)
Ejemplo n.º 46
0
        def worker(item):
            source, destination = item

            self.info("Downloading {} to {}".format(source, destination))
            key = bucket.get_key(source)
            return retry(key.get_contents_to_filename,
                         args=(destination, ),
                         sleeptime=5, max_sleeptime=60,
                         retry_exceptions=(S3CopyError, S3ResponseError))
Ejemplo n.º 47
0
def start_disable_slave(self, machine, loanid):
    try:
        url = furl(current_app.config.get("SLAVEAPI_URL", None))
        url.path.add(machine).add("actions").add("shutdown_buildslave")
        ret = retry(requests.post, args=(str(url),), ).json()
        return (ret["requestid"], machine)
    except Exception as exc:
        logger.exception(exc)
        self.retry(exc=exc)
Ejemplo n.º 48
0
    def query_low_value_tasks(self, project, bbb=False):
        # Request the set of low value tasks from the SETA service.  Low value tasks will be
        # optimized out of the task graph.
        low_value_tasks = []

        if not bbb:
            # we want to get low priority taskcluster jobs
            url = SETA_ENDPOINT % (project, 'taskcluster')
        else:
            # we want low priority buildbot jobs
            url = SETA_ENDPOINT % (project, 'buildbot&priority=5')

        # Try to fetch the SETA data twice, falling back to an empty list of low value tasks.
        # There are 10 seconds between each try.
        try:
            logger.debug("Retrieving low-value jobs list from SETA")
            response = retry(requests.get, attempts=2, sleeptime=10,
                             args=(url, ),
                             kwargs={'timeout': 60, 'headers': ''})
            task_list = json.loads(response.content).get('jobtypes', '')

            if type(task_list) == dict and len(task_list) > 0:
                if type(task_list.values()[0]) == list and len(task_list.values()[0]) > 0:
                    low_value_tasks = task_list.values()[0]
                    # bb job types return a list instead of a single string,
                    # convert to a single string to match tc tasks format
                    if type(low_value_tasks[0]) == list:
                        low_value_tasks = [self._get_task_string(x) for x in low_value_tasks]

            # ensure no build tasks slipped in, we never want to optimize out those
            low_value_tasks = [x for x in low_value_tasks if 'build' not in x.lower()]

        # In the event of request times out, requests will raise a TimeoutError.
        except exceptions.Timeout:
            logger.warning("SETA timeout, we will treat all test tasks as high value.")

        # In the event of a network problem (e.g. DNS failure, refused connection, etc),
        # requests will raise a ConnectionError.
        except exceptions.ConnectionError:
            logger.warning("SETA connection error, we will treat all test tasks as high value.")

        # In the event of the rare invalid HTTP response(e.g 404, 401),
        # requests will raise an HTTPError exception
        except exceptions.HTTPError:
            logger.warning("We got bad Http response from ouija,"
                           " we will treat all test tasks as high value.")

        # We just print the error out as a debug message if we failed to catch the exception above
        except exceptions.RequestException as error:
            logger.warning(error)

        # When we get invalid JSON (i.e. 500 error), it results in a ValueError (bug 1313426)
        except ValueError as error:
            logger.warning("Invalid JSON, possible server error: {}".format(error))

        return low_value_tasks
Ejemplo n.º 49
0
def format_in_table(active_jobs, master):
    results = {}
    sum_removed = 0
    sum_remaining = 0

    data = retry(requests.get, args=('http://alertmanager.allizom.org/data/jobnames/', ),
                 kwargs={'headers': headers,
                         'verify': True}).json()
    running_jobs = data['results']

    for jobtype in active_jobs:
        key = "%s_%s" % (jobtype[0], jobtype[1])
        if key not in results:
            results[key] = []

        for item in master:
            if item[0] == jobtype[0] and item[1] == jobtype[1]:
                results[key].append(item[2])

    keys = results.keys()
    keys.sort()
    missing_jobs = []
    for key in keys:
        data = results[key]
        data.sort()
        output = ""
        for platform, buildtype, test in active_jobs:
            if "%s_%s" % (platform, buildtype) != key:
                continue

            output += '\t'
            if test in data or '' in data:
                found = False
                for job in running_jobs:
                    if job['name'] == test:
                        output += job['job_type_symbol']
                        found = True
                        break

                if not found:
                    output += '**'
                    missing_jobs.append(test)

                sum_removed += 1
            else:
                output += "--"
                sum_remaining += 1

        print "%s%s" % (key, output)

    if missing_jobs:
        print "** new jobs which need a code: %s" % ','.join(missing_jobs)

    print "Total removed %s" % (sum_removed)
    print "Total remaining %s" % (sum_remaining)
    print "Total jobs %s" % (sum_removed + sum_remaining)
        def worker(item):
            source, destination = item

            self.info("Copying {} to {}".format(source, destination))
            return retry(bucket.copy_key,
                         args=(destination,
                               self.config["bucket_name"],
                               source),
                         sleeptime=5, max_sleeptime=60,
                         retry_exceptions=(S3CopyError, S3ResponseError))
Ejemplo n.º 51
0
def retry_get(url, **karwgs):
    """
    More robust `requests.get` equivalent function.

    This is equivalent to the requests.get function, except that
    it will retry the requests call three times in case of HTTPError or
    ConnectionError.
    """
    return redo.retry(get_http_session().get, attempts=3, sleeptime=1,
                      retry_exceptions=(requests.exceptions.HTTPError,
                                        requests.exceptions.ConnectionError),
                      args=(url,), kwargs=karwgs)
Ejemplo n.º 52
0
def waitfor_disable_slave(self, data, loanid):
    requestid, machine = data
    try:
        url = furl(current_app.config.get("SLAVEAPI_URL", None))
        url.path.add(machine).add("actions").add("shutdown_buildslave")
        url.args["requestid"] = requestid
        ret = retry(requests.get, args=(str(url),), kwargs=dict()).json()
        if ret["state"] in (0, 1):
            # 0 = PENDING, 1 = RUNNING (3=Failed and 2=Success)
            raise Exception("Continue waiting for disabled slave")
    except Exception as exc:
        self.retry(exc=exc)
Ejemplo n.º 53
0
def query_the_runnablejobs(task_id, repo_name='mozilla-inbound'):
    url = RUNNABLE_API.format(repo_name, task_id)
    try:
        data = retry(requests.get, args=(url, ), kwargs={'headers': HEADERS}).json()
        if data:
            # A lot of code components still rely on the file being on disk
            with open(get_runnable_jobs_path(), 'w') as f:
                json.dump(data, f, indent=2, sort_keys=True)

        return data
    except:
        LOG.warning("We failed to get runnablejobs via %s" % url)
        return None
Ejemplo n.º 54
0
    def query_low_value_tasks(self, project):
        # Request the set of low value tasks from the SETA service.  Low value tasks will be
        # optimized out of the task graph.
        if project not in SETA_PROJECTS:
            logger.debug("SETA is not enabled for project `{}`".format(project))
            return []

        logger.debug("Querying SETA service for low-value tasks on {}".format(project))
        low_value_tasks = []

        url = SETA_ENDPOINT % project
        # Try to fetch the SETA data twice, falling back to an empty list of low value tasks.
        # There are 10 seconds between each try.
        try:
            logger.debug("Retrieving low-value jobs list from SETA")
            response = retry(requests.get, attempts=2, sleeptime=10,
                             args=(url, ),
                             kwargs={'timeout': 5, 'headers': headers})
            task_list = json.loads(response.content).get('jobtypes', '')
            if len(task_list) > 0:
                low_value_tasks = task_list.values()[0]

            # Bug 1315145, disable SETA for tier-1 platforms until backfill is implemented.
            low_value_tasks = [x for x in low_value_tasks if x.find('debug') == -1]
            low_value_tasks = [x for x in low_value_tasks if x.find('asan') == -1]

        # In the event of request times out, requests will raise a TimeoutError.
        except exceptions.Timeout:
            logger.warning("SETA server is timeout, we will treat all test tasks as high value.")

        # In the event of a network problem (e.g. DNS failure, refused connection, etc),
        # requests will raise a ConnectionError.
        except exceptions.ConnectionError:
            logger.warning("SETA server is timeout, we will treat all test tasks as high value.")

        # In the event of the rare invalid HTTP response(e.g 404, 401),
        # requests will raise an HTTPError exception
        except exceptions.HTTPError:
            logger.warning("We got bad Http response from ouija,"
                           " we will treat all test tasks as high value.")

        # We just print the error out as a debug message if we failed to catch the exception above
        except exceptions.RequestException as error:
            logger.warning(error)

        # When we get invalid JSON (i.e. 500 error), it results in a ValueError (bug 1313426)
        except ValueError as error:
            logger.warning("Invalid JSON, possible server error: {}".format(error))

        return low_value_tasks
Ejemplo n.º 55
0
    def get_transactions(self, bar_data):
        if self.simulate_orders:
            return super(ExchangeBlotter, self).get_transactions(bar_data)

        else:
            return retry(
                action=self.get_exchange_transactions,
                attempts=self.attempts['get_transactions_attempts'],
                sleeptime=self.attempts['retry_sleeptime'],
                retry_exceptions=(ExchangeRequestError,),
                cleanup=lambda: log.warn(
                    'Fetching exchange transactions again.'
                )
            )
Ejemplo n.º 56
0
def get_raw_data(start_date, end_date):
    if not end_date:
        end_date = datetime.datetime.now()

    if not start_date:
        start_date = end_date - datetime.timedelta(days=SETA_WINDOW)

    url = "http://alertmanager.allizom.org/data/seta/?startDate=%s&endDate=%s" % \
          (start_date.strftime("%Y-%m-%d"), end_date.strftime("%Y-%m-%d"))

    response = retry(requests.get, args=(url, ),
                     kwargs={'headers': headers, 'verify': True})
    data = json.loads(response.content)
    return data['failures']
Ejemplo n.º 57
0
def valid_revision(repo_url, revision):
    """Verify that a revision exists in the given repository URL."""

    global VALID_CACHE
    if (repo_url, revision) in VALID_CACHE:
        return VALID_CACHE[(repo_url, revision)]

    LOG.debug("Determine if the revision is valid.")
    url = "%s?changeset=%s&tipsonly=1" % (JSON_PUSHES % {"repo_url": repo_url}, revision)
    data = retry(requests.get, args=(url,)).json()
    ret = True

    # A valid revision will return a dictionary with information about exactly one revision
    if len(data) != 1:
        LOG.warning("Revision %s not found on branch %s" % (revision, repo_url))
        ret = False

    VALID_CACHE[(repo_url, revision)] = ret
    return ret
Ejemplo n.º 58
0
        def worker(item):
            source, destination = item

            def copy_key():
                dest_key = bucket.get_key(destination)
                source_key = bucket.get_key(source)
                if not dest_key:
                    self.info("Copying {} to {}".format(source, destination))
                    bucket.copy_key(destination, self.config["bucket_name"],
                                    source)
                elif source_key.etag == dest_key.etag:
                    self.warning(
                        "{} already exists with the same content ({}), skipping copy".format(
                            destination, dest_key.etag))
                else:
                    self.fatal(
                        "{} already exists with the different content (src: {}, dest: {}), aborting".format(
                            destination, source_key.etag, dest_key.etag))

            return retry(copy_key, sleeptime=5, max_sleeptime=60,
                         retry_exceptions=(S3CopyError, S3ResponseError))
Ejemplo n.º 59
0
def query_pushes_by_revision_range(
    repo_url, from_revision, to_revision, version=VERSION, tipsonly=True, return_revision_list=False
):
    """
    Return an ordered list of pushes (by date - oldest (starting) first).

    repo_url                - represents the URL to clone a repo
    from_revision           - from which revision to start with (oldest)
    to_revision             - from which revision to end with (newest)
    version                 - version of json-pushes to use (see docs)
    tipsonly                - only return the tip most push been returned if it's True
    return_revision_list    - return a list of revisions if it's True
    """
    push_list = []
    url = "%s?fromchange=%s&tochange=%s&version=%d" % (
        JSON_PUSHES % {"repo_url": repo_url},
        from_revision,
        to_revision,
        version,
    )

    if tipsonly:
        url += "&tipsonly=1"

    LOG.debug("About to fetch %s" % url)
    req = retry(requests.get, args=(url,))
    pushes = req.json()["pushes"]
    # json-pushes does not include the starting revision
    push_list.append(query_push_by_revision(repo_url, from_revision))

    for push_id in sorted(pushes.keys()):
        # Querying by push ID is perferred because date ordering is
        # not guaranteed (due to system clock skew)
        # We can interact with self-serve with the full char representation
        push_list.append(Push(push_id=push_id, push_info=pushes[push_id]))
    if return_revision_list:
        return _pushes_to_list(push_list)

    return push_list