コード例 #1
0
def download(job, regex):
    """Grabs platform specific distribution targets from Hudson"""
    url = urllib.urlopen("/".join([HUDSON_ROOT, job, HUDSON_XML_SUFFIX]))
    hudson_xml = url.read()
    hudson_xml = hudson_xml.replace('origin/', 'origin_')
    url.close()
    root = XML(hudson_xml)

    building = root.findtext("./building")
    if building == 'true':
        print '%s build in progress, exiting...' % job
        sys.exit(1)
    revision = root.findtext("./changeSet/revision/revision")
    artifacts = root.findall("./artifact")
    print "Retrieving %s job artifacts from revision: %s" % (job, revision)
    base_url = "/".join([HUDSON_ROOT, job, 'lastSuccessfulBuild/artifact'])
    new_artifacts = list()
    for artifact in artifacts:
        filename = artifact.findtext("fileName")
        if not regex.match(filename):
            continue
        artifact_url = "/".join([base_url, artifact.findtext("relativePath")])
        print "Downloading %s from URL %s" % (filename, artifact_url)
        urllib.urlretrieve(artifact_url , filename)
        new_artifacts.append(filename)
    return [revision, new_artifacts]
コード例 #2
0
    def post(self, request, *args, **kwargs):
        notify_xml = XML(request.raw_post_data)
        self.notify_type = self._extract_notify_type(notify_xml)
        self.notify_type_const = \
            GoogleOrder.trans_notify_type_const(self.notify_type)

        ts_utc = dt_parse(notify_xml.findtext(xpq_timestamp))
        self.timestamp = self._trans_utc_to_local(ts_utc)

        self.serial_number = notify_xml.get('serial-number')
        self.order_number = long(notify_xml.findtext(xpq_order_number))

        backend_class = get_backend_class(ngc_settings.BACKEND)
        self.backend = backend_class(request, notify_xml=notify_xml)
        self.cart = self.backend.get_cart()

        # if we don't find a cart, we do actually go ahead and continue
        # processing. The idea is the data npo_google_checkout holds should
        # match all of what google checkout holds

        msg_target = "cart '{0}'".format(self.cart) \
                if self.cart else 'unknown cart'
        msg = "GC {0} #{1} received for {2}.".format(
                self.notify_type, self.serial_number, msg_target),
        logger.info(msg, extra={'request': request})

        # notification type-specific handling
        if self.notify_type_const == GoogleOrder.NEW_ORDER_NOTIFY_TYPE:
            self._post_new_order(notify_xml)
        else:
            try:
                order = GoogleOrder.objects.get(number=self.order_number)
            except GoogleOrder.DoesNotExist:
                order = None
            else:
                order.last_notify_type = self.notify_type_const
                order.last_notify_dt = self.timestamp

            if not order:
                # silently ignore notifications for orders we didn't see the
                # new-order-notification for
                pass
            elif self.notify_type_const == GoogleOrder.ORDER_STATE_CHANGE_NOTIFY_TYPE:
                self._post_order_state_change(order, notify_xml)
            elif self.notify_type_const == GoogleOrder.RISK_INFORMATION_NOTIFY_TYPE:
                self._post_risk_informaiton(order, notify_xml)
            elif self.notify_type_const == GoogleOrder.AUTHORIZATION_AMOUNT_NOTIFY_TYPE:
                self._post_authorization_amount(order, notify_xml)
            elif self.notify_type_const == GoogleOrder.CHARGE_AMOUNT_NOTIFY_TYPE:
                self._post_charge_amount(order, notify_xml)
            else:
                msg = "Unrecognized notification '{0}' recieved". \
                        format(self.notify_type)
                raise RuntimeError(msg)

        return super(NotificationListenerView, self).get(
                request, *args, **kwargs)
コード例 #3
0
	def getAllArtifactVersions(self, strGroupID, strArtifactID):
		atVersions = []

		strPath = self.strUrlLuceneSearchGA % (strGroupID, strArtifactID)
		aucContent = self.tRestDriver.get(self.tServerBaseUrl, strPath)
		tSearchResult = XML(aucContent)

		# The search result must be complete.
		if tSearchResult.findtext('tooManyResults')!='false':
			raise Exception("Received a truncated search result!")
	
		# Loop over all results.
		for tNode in tSearchResult.findall('data/artifact'):
			strVersion = tNode.findtext('version')
			if isinstance(strVersion, basestring)==True:
				strVersion = strVersion.strip()
				if strVersion=='SNAPSHOT':
					tVersion = deploy_version.version(0, 0, 0)
				else:
					tVersion = deploy_version.version(strVersion)
				atVersions.append(tVersion)

		# Sort the versions.
		atVersions.sort()

		return atVersions
コード例 #4
0
	def findSha1Artifacts(self, strFileName, strGroupID, strArtifactID):
		atVersions = []

		# Generate the SHA1 sum for the file.
		strFileSha1 = self.generate_sha1_from_file(strFileName)

		strPath = self.strUrlLuceneSearchSha1 % strFileSha1
		aucContent = self.tRestDriver.get(self.tServerBaseUrl, strPath)
		tSearchResult = XML(aucContent)

		# The search result must be complete.
		if tSearchResult.findtext('tooManyResults')!='false':
			raise Exception("Received a truncated search result!")
	
		# Loop over all results.
		for tNode in tSearchResult.findall('data/artifact'):
			strG = tNode.findtext('groupId')
			strA = tNode.findtext('artifactId')
			strVersion = tNode.findtext('version')

			if isinstance(strG, basestring)==True and isinstance(strA, basestring)==True and isinstance(strVersion, basestring)==True:
				strG = strG.strip()
				strA = strA.strip()
				strVersion = strVersion.strip()
				if strGroupID==strG and strArtifactID==strA:
					if strVersion=='SNAPSHOT':
						tVersion = deploy_version.version(0, 0, 0)
					else:
						tVersion = deploy_version.version(strVersion)
					atVersions.append(tVersion)

		atVersions.sort()

		return atVersions
コード例 #5
0
    def getAllArtifactVersions(self, strGroupID, strArtifactID):
        atVersions = []

        strPath = self.strUrlLuceneSearchGA % (strGroupID, strArtifactID)
        aucContent = self.tRestDriver.get(self.tServerBaseUrl, strPath)
        tSearchResult = XML(aucContent)

        # The search result must be complete.
        if tSearchResult.findtext('tooManyResults') != 'false':
            raise Exception("Received a truncated search result!")

        # Loop over all results.
        for tNode in tSearchResult.findall('data/artifact'):
            strVersion = tNode.findtext('version')
            if isinstance(strVersion, basestring) == True:
                strVersion = strVersion.strip()
                if strVersion == 'SNAPSHOT':
                    tVersion = deploy_version.version(0, 0, 0)
                else:
                    tVersion = deploy_version.version(strVersion)
                atVersions.append(tVersion)

        # Sort the versions.
        atVersions.sort()

        return atVersions
コード例 #6
0
def svn_log(pkgbase, repo):
    '''Retrieve the most recent SVN log entry for the given pkgbase and
    repository. The configured setting SVN_BASE_URL is used along with the
    svn_root for each repository to form the correct URL.'''
    path = '%s%s/%s/trunk/' % (settings.SVN_BASE_URL, repo.svn_root, pkgbase)
    cmd = ['svn', 'log', '--limit=1', '--xml', path]
    log_data = subprocess.check_output(cmd)
    # the XML format is very very simple, especially with only one revision
    xml = XML(log_data)
    revision = int(xml.find('logentry').get('revision'))
    date = datetime.strptime(xml.findtext('logentry/date'),
                             '%Y-%m-%dT%H:%M:%S.%fZ')
    return {
        'revision': revision,
        'date': date,
        'author': xml.findtext('logentry/author'),
        'message': xml.findtext('logentry/msg'),
    }
コード例 #7
0
ファイル: populate_signoffs.py プロジェクト: brain0/archweb
def svn_log(pkgbase, repo):
    '''Retrieve the most recent SVN log entry for the given pkgbase and
    repository. The configured setting SVN_BASE_URL is used along with the
    svn_root for each repository to form the correct URL.'''
    path = '%s%s/%s/trunk/' % (settings.SVN_BASE_URL, repo.svn_root, pkgbase)
    cmd = ['svn', 'log', '--limit=1', '--xml', path]
    log_data = subprocess.check_output(cmd)
    # the XML format is very very simple, especially with only one revision
    xml = XML(log_data)
    revision = int(xml.find('logentry').get('revision'))
    date = datetime.strptime(xml.findtext('logentry/date'),
            '%Y-%m-%dT%H:%M:%S.%fZ')
    return {
        'revision': revision,
        'date': date,
        'author': xml.findtext('logentry/author'),
        'message': xml.findtext('logentry/msg'),
    }
コード例 #8
0
def download(config):
    url = urllib.urlopen("%s%s" % (URLs[config], API_SUFFIX))
    hudson_xml = url.read()
    url.close()
    root = XML(hudson_xml)
    build_no = root.findtext("./number")
    bioformats_rev = "Unknown"
    for artifact in root.findall("./artifact"):
        file_name = artifact.findtext("./fileName")
        match = re.match(r"bio-formats-r(\d+).jar", file_name)
        if match:
            bioformats_rev = match.group(1)
            break
    omero_rev = root.findtext("./changeSet/revision/revision")
    url = urllib.urlopen("%s%s%s" % (URLs[config], TEST_SUFFIX, API_SUFFIX))
    hudson_xml = url.read()
    url.close()
    return {"root": XML(hudson_xml), "build_no": build_no, "bioformats_rev": bioformats_rev, "omero_rev": omero_rev}
コード例 #9
0
def download(config):
    url = urllib.urlopen("%s%s" % (URLs[config], API_SUFFIX))
    hudson_xml = url.read()
    url.close()
    root = XML(hudson_xml)
    build_no = root.findtext("./number")
    bioformats_rev = 'Unknown'
    for artifact in root.findall("./artifact"):
        file_name = artifact.findtext("./fileName")
        match = re.match(r'bio-formats-r(\d+).jar', file_name)
        if match:
            bioformats_rev = match.group(1)
            break
    omero_rev = root.findtext("./changeSet/revision/revision")
    url = urllib.urlopen("%s%s%s" % (URLs[config], TEST_SUFFIX, API_SUFFIX))
    hudson_xml = url.read()
    url.close()
    return { 'root': XML(hudson_xml), 'build_no': build_no,
             'bioformats_rev': bioformats_rev, 'omero_rev': omero_rev }
コード例 #10
0
    def findSha1Artifacts(self, strFileName, strGroupID, strArtifactID):
        atVersions = []

        # Generate the SHA1 sum for the file.
        strFileSha1 = self.generate_sha1_from_file(strFileName)

        strPath = self.strUrlLuceneSearchSha1 % strFileSha1
        aucContent = self.tRestDriver.get(self.tServerBaseUrl, strPath)
        tSearchResult = XML(aucContent)

        # The search result must be complete.
        if tSearchResult.findtext('tooManyResults') != 'false':
            raise Exception("Received a truncated search result!")

        # Loop over all results.
        for tNode in tSearchResult.findall('data/artifact'):
            strG = tNode.findtext('groupId')
            strA = tNode.findtext('artifactId')
            strVersion = tNode.findtext('version')

            if isinstance(strG, basestring) == True and isinstance(
                    strA, basestring) == True and isinstance(
                        strVersion, basestring) == True:
                strG = strG.strip()
                strA = strA.strip()
                strVersion = strVersion.strip()
                if strGroupID == strG and strArtifactID == strA:
                    if strVersion == 'SNAPSHOT':
                        tVersion = deploy_version.version(0, 0, 0)
                    else:
                        tVersion = deploy_version.version(strVersion)
                    atVersions.append(tVersion)

        atVersions.sort()

        return atVersions
コード例 #11
0
def main():
    """
    Main function to handle scripted usage of the module
    """
    # Parse command-line arguments
    args = parse_arguments(sys.argv[1:])

    # Validate that our args are safe to proceed
    validate_arguments(args)

    # Expand home directory alias in the credential file path
    args.aws_credential_file = os.path.expanduser(args.aws_credential_file)

    # Enable debug logging if set by user
    if args.debug:
        logging.basicConfig(level=logging.DEBUG)

    # Set SSL verification; User prompt is a negative assertion so we apply the inverse
    # session.verify = not args.no_verify_ssl

    # Set proxies for request to AWS STS service
    # First Get local proxy settings via env variables
    proxies = urllib.request.getproxies()

    # Then, Determine if in GE LAN &
    # Check if https_proxy was set as a local environment variable
    logging.debug('Proxy settings found in environment: {}'.format(proxies))
    try:
        # If in the GE LAN and the https proxy is not set
        if (socket.gethostbyname('sts.amazonaws.com')
                == '223.255.255.255') and ('https' not in proxies):
            logging.warning(
                "GE LAN detected and proxy missing, adding default proxy information"
            )

            # Set https_proxy
            print('Setting https_proxy to http://am.proxy.ge.com:80/.')
            proxies = {'https': 'http://am.proxy.ge.com:80/', 'no': '.ge.com'}
    except OSError as e:
        print(e)

        if e.errno == 8:
            print("The network is not routable. Please fix and try again.")

        raise SystemExit(1)

    # Create a handler & opener with the proxies set above
    proxy_handler = urllib.request.ProxyHandler(proxies)
    opener = urllib.request.build_opener(proxy_handler)

    # Prompt for missing SAML credentials
    while not args.username:
        args.username = input('Username: '******'Password: '******'utf8')
    logging.debug('Decoded IDP response: {}'.format(response_text))

    # Extract the SAML assertion
    try:
        saml_xml = XML(response_text)
        assertion = saml_xml.find(".//*[@name='SAMLResponse']").attrib['value']
        logging.debug('Decoded assertion:\n{}'.format(
            base64.b64decode(assertion)))
    except:
        # TODO: Improve error checking/handling
        print('Response did not contain a valid SAML assertion')
        raise SystemExit(1)

    # Parse the returned assertion and extract the authorized roles
    aws_roles = []
    assertion_xml = XML(base64.b64decode(assertion))

    for saml2attributevalue in assertion_xml.findall(
            ".//*[@Name='https://aws.amazon.com/SAML/Attributes/Role']/"):
        logging.debug(saml2attributevalue.text)
        aws_role = {}
        aws_role['role_arn'], aws_role[
            'principal_arn'] = saml2attributevalue.text.split(',')
        aws_role['name'] = aws_role['role_arn'].split('::')[1]
        aws_roles.append(aws_role)
    logging.debug(aws_roles)

    # If we're in interactive mode, have the user select the role from a list of available roles
    while not args.role:
        print('\nPlease choose the AWS account role you would like to assume:')
        for index, aws_role in enumerate(aws_roles, 1):
            print('[{}]: {}'.format(index, aws_role['name']))
        selection = input('Selection: ')

        if selection.isdigit():
            if 1 > int(selection) > len(aws_roles):
                print('\nInvalid Selection\n')
                continue
            else:
                args.role = aws_roles[int(selection) - 1]['name']
        else:
            print('\nSelection must be an integer')

    # Find the role specified by the user
    found_roles = [r for r in aws_roles if r['name'] == args.role]
    if len(found_roles) != 1:
        print(
            'Role "{}" not found. Run program in interactive mode for a list of available roles.'
            .format(args.role))
        raise SystemExit(1)
    aws_role = found_roles[0]

    # Use the SAML assertion to get an AWS token from STS
    sts_request = {
        'Action': 'AssumeRoleWithSAML',
        'Version': '2011-06-15',
        'RoleArn': aws_role['role_arn'],
        'PrincipalArn': aws_role['principal_arn'],
        'SAMLAssertion': assertion,
    }
    sts_request_data = urllib.parse.urlencode(sts_request)
    logging.debug(
        'STS Authentication request body: {}'.format(sts_request_data))
    sts_request_data = sts_request_data.encode('ascii')

    # Submit the authentication request
    try:
        with opener.open('https://sts.amazonaws.com/',
                         sts_request_data) as response:
            sts_response = response.read().decode('utf8')
    except HTTPError as e:
        print(e)
        if e.code == 403:
            print(
                'Try using the --mfa flag and provide your RSA token when prompted for a password.'
            )
        else:
            print('Failed to assume role with SAML assertion')
        raise SystemExit(1)
    logging.debug('AWS STS Response: {}'.format(sts_response))

    # Parse the STS response
    sts_response = XML(sts_response)

    # Read in the existing config file
    config = configparser.RawConfigParser()
    config.read(args.aws_credential_file)

    # Create the requested profile if it doesn't exist
    if not config.has_section(args.profile):
        config.add_section(args.profile)

    # Update the profile
    config.set(args.profile, 'output', args.cli_output_format)
    config.set(args.profile, 'region', args.region)
    config.set(
        args.profile, 'aws_access_key_id',
        sts_response.findtext(
            './/{https://sts.amazonaws.com/doc/2011-06-15/}AccessKeyId'))
    config.set(
        args.profile, 'aws_secret_access_key',
        sts_response.findtext(
            './/{https://sts.amazonaws.com/doc/2011-06-15/}SecretAccessKey'))
    config.set(
        args.profile, 'aws_session_token',
        sts_response.findtext(
            './/{https://sts.amazonaws.com/doc/2011-06-15/}SessionToken'))

    # Write the updated config file
    try:
        configfile = open(args.aws_credential_file, 'w+')
    except FileNotFoundError:
        os.makedirs(os.path.dirname(args.aws_credential_file))
        configfile = open(args.aws_credential_file, 'w+')
    config.write(configfile)
    configfile.close()