Exemplo n.º 1
0
    def aFixityTest(self):

        self.log("Create a binary")
        headers = {
            'Content-type': 'image/jpeg',
        }
        with open(os.path.join(os.getcwd(), 'resources', 'basic_image.jpg'), 'rb') as fp:
            data = fp.read()
            r = self.do_post(self.getBaseUri(), headers=headers, body=data)
            self.assertEqual(201, r.status_code, 'Did not create binary')
            location = self.get_location(r)

        fixity_endpoint = location + "/" + TestConstants.FCR_FIXITY
        self.log("Get a fixity result")
        headers = {
            'Accept': TestConstants.JSONLD_MIMETYPE
        }
        r = self.do_get(fixity_endpoint, headers=headers)
        self.assertEqual(200, r.status_code, "Can't get the fixity result")
        body = r.content.decode('UTF-8').rstrip('\ny')
        json_body = json.loads(body)
        fixity_id = pyjq.first('.[0]."http://www.loc.gov/premis/rdf/v1#hasFixity"| .[]?."@id"', json_body)
        fixity_result = pyjq.first('.[] | select(."@id" == "{0}") | '
                                   '."http://www.loc.gov/premis/rdf/v1#hasMessageDigest" | .[]?."@id"'.format(fixity_id),
                                   json_body)
        self.assertEqual(self.FIXITY_RESULT, fixity_result, "Fixity result was not a match for expected.")

        self.log("Passed")
Exemplo n.º 2
0
    def get(self, irc, msg, args, selector, data):
        """<selector> <json data>

        Uses the selector against the json data and returns the selected
        value.
        See http://stedolan.github.io/jq/tutorial/ to learn about the
        selector syntax."""
        irc.reply(pyjq.first(selector, json.loads(data)))
 def get_transaction_provider(self):
     headers = {
         'Accept': TestConstants.JSONLD_MIMETYPE
     }
     r = self.do_get(self.getFedoraBase(), headers=headers)
     self.assertEqual(200, r.status_code, "Did not get expected response")
     body = r.content.decode('UTF-8')
     json_body = json.loads(body)
     tx_provider = pyjq.first('.[]."http://fedora.info/definitions/v4/repository#hasTransactionProvider" | .[]."@id"',
                              json_body)
     return tx_provider
Exemplo n.º 4
0
    def _pyjq(self, w_query, w_json = None):
        """
        Runs a jq query against a json structure
        """

        if w_json == None:
            q_json = self.last_response
        else:
            q_json = w_json

        try:
            r = pyjq.first(w_query, q_json)
        except:
            raise
        #
        return r
Exemplo n.º 5
0
def validate_project_response(content):
    return pyjq.first('.data.createProject.project', content)
Exemplo n.º 6
0
def validate_user_story_name_select(content):
    return pyjq.first('.data.userStoryByName', content)
Exemplo n.º 7
0
def validate_threat_model_query(content):
    return pyjq.first('.data.createOrUpdateThreatModel.threatModel', content)
Exemplo n.º 8
0
def validate_abuser_story(content):
    return pyjq.first('.data.createOrUpdateAbuserStory.abuserStory', content)
def get_site_contact_type_id():
    response = requests.get(GWS_URL + '/contactTypes?code=SiteContact')
    response.raise_for_status()
    return pyjq.first('._embedded.contactTypes[0].id', json.loads(response.text))
Exemplo n.º 10
0
def step_impl(context):
    assert context.ushard_group != None
    assert context.mysql_groups != None
    assert len(context.mysql_groups) >= 2

    configs = api_get(context, context.version_3 + "/ushard_deploy/get_config",
                      {
                          "group_id": context.ushard_group["group_id"],
                      })

    id = generate_id()
    user1 = {
        "name": "user1_" + id,
        "password": "******" + id,
        "schema": "schema_" + id
    }
    id = generate_id()
    user2 = {
        "name": "user2_" + id,
        "password": "******" + id,
        "schema": "schema_" + id
    }

    context.ushard_users = {"user1": user1, "user2": user2}

    rule_id = "rule_" + generate_id()

    mysql_group_1 = get_mysql_group_brief(context,
                                          context.mysql_groups[0]["group_id"])
    mysql_group_2 = get_mysql_group_brief(context,
                                          context.mysql_groups[1]["group_id"])

    # prepare database in MySQL
    api_get(
        context, context.version_3 + "/helper/query_mysql", {
            "mysql_id": mysql_group_1["master_id"],
            "user": "******",
            "password": mysql_group_1["root_password"],
            "query": "create database {0}_db1".format(user1["schema"])
        })
    api_get(
        context, context.version_3 + "/helper/query_mysql", {
            "mysql_id": mysql_group_1["master_id"],
            "user": "******",
            "password": mysql_group_1["root_password"],
            "query": "create database {0}_db2".format(user1["schema"])
        })
    api_get(
        context, context.version_3 + "/helper/query_mysql", {
            "mysql_id": mysql_group_2["master_id"],
            "user": "******",
            "password": mysql_group_2["root_password"],
            "query": "create database {0}_db1".format(user2["schema"])
        })
    api_get(
        context, context.version_3 + "/helper/query_mysql", {
            "mysql_id": mysql_group_2["master_id"],
            "user": "******",
            "password": mysql_group_2["root_password"],
            "query": "create database {0}_db2".format(user2["schema"])
        })

    # update dble

    ## server.xml
    serverXmlStr = pyjq.first('.[] | select(.name == "server.xml") | .value',
                              configs)
    assert serverXmlStr is not None
    serverXml = read_xml(serverXmlStr)

    for user in [user1, user2]:
        serverXml.getroot().append(
            etree.fromstring("""
        <user name="{0}">
            <property name="password">{1}</property>
            <property name="readOnly">false</property>
            <property name="schemas">{2}</property>
        </user>""".format(user["name"], user["password"], user["schema"])))

    ## schema.xml
    schemaXmlStr = pyjq.first('.[] | select(.name == "schema.xml") | .value',
                              configs)
    assert schemaXmlStr is not None
    schemaXml = read_xml(schemaXmlStr)

    newSections = """
<newSections>
    <schema name="{1}" sqlMaxLimit="100">
        <table name="test" type="default" primaryKey="id" needAddLimit="true" rule="{0}" dataNode="{1}_dn1,{1}_dn2"></table>
    </schema>
    <schema name="{6}" sqlMaxLimit="100">
        <table name="test" type="default" primaryKey="id" needAddLimit="true" rule="{0}" dataNode="{6}_dn3,{6}_dn4"></table>
    </schema>
    <dataNode name="{1}_dn1" dataHost="{1}_dh1" database="{1}_db1"></dataNode>
    <dataNode name="{1}_dn2" dataHost="{1}_dh1" database="{1}_db2"></dataNode>
    <dataNode name="{6}_dn3" dataHost="{6}_dh2" database="{6}_db1"></dataNode>
    <dataNode name="{6}_dn4" dataHost="{6}_dh2" database="{6}_db2"></dataNode>
    <dataHost name="{1}_dh1" maxCon="100" minCon="10" balance="1" switchType="-1" slaveThreshold="-1">
        <heartbeat>show slave status</heartbeat>
        <writeHost host="{1}_M" url="{2}" user="******" password="******">
            <readHost host="{1}_S1" url="{3}" user="******" password="******"></readHost>
        </writeHost>
    </dataHost>
    <dataHost name="{6}_dh2" maxCon="100" minCon="10" balance="1" switchType="-1" slaveThreshold="-1">
        <heartbeat>show slave status</heartbeat>
        <writeHost host="{6}_M" url="{7}" user="******" password="******">
            <readHost host="{6}_S1" url="{8}" user="******" password="******"></readHost>
        </writeHost>
    </dataHost>
</newSections>""".format(
        rule_id, user1["schema"], mysql_group_1["master_addr"],
        mysql_group_1["slave_addr"], "root", mysql_group_1["root_password"],
        user2["schema"], mysql_group_2["master_addr"],
        mysql_group_2["slave_addr"], "root", mysql_group_2["root_password"])

    for item in etree.fromstring(newSections):
        schemaXml.getroot().append(item)

    ## rule.xml
    ruleXmlStr = pyjq.first('.[] | select(.name == "rule.xml") | .value',
                            configs)
    assert ruleXmlStr is not None
    ruleXml = read_xml(ruleXmlStr)

    ruleXml.getroot().insert(
        0,
        etree.fromstring("""
    <tableRule name="{0}">
        <rule>
            <columns>id</columns>
            <algorithm>{0}</algorithm>
        </rule>
    </tableRule>""".format(rule_id)))

    ruleXml.getroot().append(
        etree.fromstring("""
    <function name="{0}" class="Hash">
        <property name="partitionCount">2</property>
        <property name="partitionLength">1</property>
    </function>""".format(rule_id)))

    ## update config
    cacheServiceXmlStr = pyjq.first(
        '.[] | select(.name == "cacheservice.properties") | .value', configs)
    assert cacheServiceXmlStr is not None
    ehcacheXmlStr = pyjq.first('.[] | select(.name == "ehcache.xml") | .value',
                               configs)
    assert ehcacheXmlStr is not None

    config = [{
        "name": "rule.xml",
        "value": xml_to_str(ruleXml),
    }, {
        "name": "schema.xml",
        "value": xml_to_str(schemaXml),
    }, {
        "name": "server.xml",
        "value": xml_to_str(serverXml),
    }, {
        "name": "cacheservice.properties",
        "value": cacheServiceXmlStr,
    }, {
        "name": "ehcache.xml",
        "value": ehcacheXmlStr,
    }]

    api_request_post(
        context, context.version_3 + "/ushard_deploy/save_config", {
            "is_sync": True,
            "group_id": context.ushard_group["group_id"],
            "ushard_config": json.dumps(config)
        })
Exemplo n.º 11
0
def step_imp(context, should_or_not, type):
    alert_info = get_all_alert_info(context)
    match = pyjq.first(
        '.|.receivers[]|.universe_configs[]|select(."type"=="{0}")'.format(
            type), alert_info)
    assert (match and should_or_not) or (not match and not should_or_not)
Exemplo n.º 12
0
def step_impl(context, should_or_not):
    resp = get_all_unresolved_alert_record(context)
    alert_record = pyjq.first(
        '.[]|select(.fingerprint=="{0}")'.format(context.fingerprint), resp)
    assert (alert_record and should_or_not) or (not alert_record
                                                and not should_or_not)
Exemplo n.º 13
0
def step_imp(context, code):
    resp = get_alert_record_in_order(context, 'timestamp')
    alert_info = pyjq.first('.[]|select(."code" == {0})'.format(code), resp)
    if alert_info is not None:
        return True
Exemplo n.º 14
0
 def test_first(self):
     self.assertEqual(
         pyjq.first('.[] | . + $foo', ['val1', 'val2'],
                    vars=dict(foo='bar')), 'val1bar')
Exemplo n.º 15
0
def get_conn(config, prefix=None, tojq=None, **kwargs):
    '''
    Returns a DB Cursors (with pymysql) that supports all of the
    hotness
    '''

    # Given a Config Dictionary with an optional prefix and tojq
    # Pull the Data Out and Connect to the Database

    logger = logging.getLogger("db_helper.py")

    if isinstance(config, dict) is False:
        raise TypeError("config is not a dictionary")

    dbconfig = None

    if tojq is not None and isinstance(tojq, str):

        try:
            dbconfig = pyjq.first(tojq, config)
        except Exception as jq_error:
            logger.error("Unable to find config at jq rule : {}".format(tojq))
            logger.info("Error : {}".format(jq_error))

            raise jq_error
        else:
            logger.debug("Successfully tojq'ed this configuration.")
    else:
        dbconfig = config

    if isinstance(dbconfig, dict) is False:
        logger.error("DBConfig isn't here!")
        raise TypeError("Problem reading Database Information")

    pymysql_args = {
        "host":
        dbconfig["{}dbhostname".format(prefix)],
        "port":
        int(dbconfig["{}dbport".format(prefix)]),
        "user":
        dbconfig["{}dbusername".format(prefix)],
        "password":
        dbconfig["{}dbpassword".format(prefix)],
        "database":
        dbconfig["{}dbdb".format(prefix)],
        "autocommit":
        dbconfig.get("{}autocommit".format(prefix), kwargs.get("ac_def",
                                                               True)),
        "charset":
        dbconfig.get("{}charset".format(prefix),
                     kwargs.get("char_def", "utf8mb4"))
    }

    if dbconfig.get("{}ssl", False) is True:
        pymysql_args["ssl"] = dict()

        ca = dbconfig.get("{}dbsslca".format(prefix), None)
        capath = dbconfig.get("{}dbsslcapath".format(prefix), None)
        cert = dbconfig.get("{}dbsslcert".format(prefix), None)
        key = dbconfig.get("{}dbsslkey".format(prefix), None)
        cipher = dbconfig.get("{}dbsslcipher".format(prefix), None)

        if ca is not None:
            pymysql_args["ssl"]["ca"] = ca
        if capath is not None:
            pymysql_args["ssl"]["capath"] = capath
        if cert is not None:
            pymysql_args["ssl"]["cert"] = cert
        if key is not None:
            pymysql_args["ssl"]["key"] = key
        if cipher is not None:
            pymysql_args["ssl"]["cipher"] = cipher

    try:

        db_conn = pymysql.connect(**pymysql_args)

        logger.debug("Connected to {user}@{host}:{port}/{database}".format(
            **pymysql_args))

    except Exception as connection_error:
        logger.warning(
            "Connection Failed to {user}@{host}:{port}/{database}".format(
                **pymysql_args))
        logger.debug("Error {}".format(connection_error))

        raise connection_error

    return db_conn
Exemplo n.º 16
0
#!/usr/bin/env python3
import pyjq

golden_data = {"param_name": "PKG_TAG_NAME", "param_type": None}

data = dict(
    parameters=[
        dict(name="PKG_TAG_NAME", value="trunk"),
        dict(name="GIT_COMMIT", value="master"),
        dict(name="TRIGGERED_JOB", value="trunk-buildall"),
    ],
    id="2013-12-27_00-09-37",
    changeSet=dict(items=[], kind="git"),
)

d = pyjq.first('.parameters[] | {"param_name": .name, "param_type":.type}',
               data)

assert d == golden_data
Exemplo n.º 17
0
def get_silence_list_by_rule_name(context, rule_name):
    resp = api_get(context, context.version_3 + "/alert/list_silence",
                   {"rule_name": rule_name})
    rule = pyjq.first('.[]', resp)
    return rule
Exemplo n.º 18
0
def getObject(key,object):
    return pyjq.first(key,object)
Exemplo n.º 19
0
def test_first():
    assert pyjq.first(".[] | . + $foo", ["val1", "val2"],
                      vars=dict(foo="bar")) == "val1bar"
Exemplo n.º 20
0
def public(accounts, config):
    for account in accounts:
        # Get the data from the `prepare` command
        outputfilter = {
            'internal_edges': False,
            'read_replicas': False,
            'inter_rds_edges': False,
            'azs': False,
            'collapse_by_tag': None,
            'collapse_asgs': True,
            'mute': True
        }
        network = build_data_structure(account, config, outputfilter)

        # Look at all the edges for ones connected to the public Internet (0.0.0.0/0)
        for edge in pyjq.all(
                '.[].data|select(.type=="edge")|select(.source=="0.0.0.0/0")',
                network):

            # Find the node at the other end of this edge
            target = {'arn': edge['target'], 'account': account['name']}
            target_node = pyjq.first(
                '.[].data|select(.id=="{}")'.format(target['arn']), network,
                {})

            # Depending on the type of node, identify what the IP or hostname is
            if target_node['type'] == 'elb':
                target['type'] = 'elb'
                target['hostname'] = target_node['node_data']['DNSName']
            elif target_node['type'] == 'autoscaling':
                target['type'] = 'autoscaling'
                target['hostname'] = target_node['node_data'].get(
                    'PublicIpAddress', '')
                if target['hostname'] == '':
                    target['hostname'] = target_node['node_data'][
                        'PublicDnsName']
            elif target_node['type'] == 'rds':
                target['type'] = 'rds'
                target['hostname'] = target_node['node_data']['Endpoint'][
                    'Address']
            elif target_node['type'] == 'ec2':
                target['type'] = 'ec2'
                dns_name = target_node['node_data'].get('PublicDnsName', '')
                target['hostname'] = target_node['node_data'].get(
                    'PublicIpAddress', dns_name)
            else:
                print(
                    pyjq.first(
                        '.[].data|select(.id=="{}")|[.type, (.node_data|keys)]'
                        .format(target['arn']), network, {}))

            # Check if any protocol is allowed (indicated by IpProtocol == -1)
            ingress = pyjq.all('.[]', edge.get('node_data', {}))

            sg_group_allowing_all_protocols = pyjq.first(
                'select(.IpPermissions[]|.IpProtocol=="-1")|.GroupId', ingress,
                None)
            public_sgs = set()
            if sg_group_allowing_all_protocols is not None:
                log_warning(
                    'All protocols allowed access to {} due to {}'.format(
                        target, sg_group_allowing_all_protocols))
                range_string = '0-65535'
                public_sgs.add(sg_group_allowing_all_protocols)
            else:
                # from_port and to_port mean the beginning and end of a port range
                # We only care about TCP (6) and UDP (17)
                # For more info see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html
                port_ranges = []
                for sg in ingress:
                    for ip_permission in sg['IpPermissions']:
                        selection = 'select((.IpProtocol=="tcp") or (.IpProtocol=="udp")) | select(.IpRanges[].CidrIp=="0.0.0.0/0")'
                        port_ranges.extend(
                            pyjq.all(
                                '{}| [.FromPort,.ToPort]'.format(selection),
                                ip_permission))
                        public_sgs.add(sg['GroupId'])
                range_string = port_ranges_string(regroup_ranges(port_ranges))

            target['ports'] = range_string
            target['public_sgs'] = list(public_sgs)
            if target['ports'] == "":
                issue_msg = 'No ports open for tcp or udp (probably can only be pinged). Rules that are not tcp or udp: {} -- {}'
                log_warning(
                    issue_msg.format(
                        json.dumps(
                            pyjq.all(
                                '.[]|select((.IpProtocol!="tcp") and (.IpProtocol!="udp"))'
                                .format(selection), ingress)), account))
            print(json.dumps(target, indent=4, sort_keys=True))

        account = Account(None, account)
        for region_json in get_regions(account):
            region = Region(account, region_json)
            # Look for CloudFront
            if region.name == 'us-east-1':
                json_blob = query_aws(region.account,
                                      'cloudfront-list-distributions', region)

                for distribution in json_blob.get('DistributionList',
                                                  {}).get('Items', []):
                    if not distribution['Enabled']:
                        continue

                    target = {
                        'arn': distribution['ARN'],
                        'account': account.name
                    }
                    target['type'] = 'cloudfront'
                    target['hostname'] = distribution['DomainName']
                    target['ports'] = '80,443'

                    print(json.dumps(target, indent=4, sort_keys=True))

            # Look for API Gateway
            json_blob = query_aws(region.account, 'apigateway-get-rest-apis',
                                  region)
            if json_blob is not None:
                for api in json_blob.get('items', []):
                    target = {'arn': api['id'], 'account': account.name}
                    target['type'] = 'apigateway'
                    target[
                        'hostname'] = '{}.execute-api.{}.amazonaws.com'.format(
                            api['id'], region.name)
                    target['ports'] = '80,443'

                    print(json.dumps(target, indent=4, sort_keys=True))
Exemplo n.º 21
0
def get_public_nodes(account, config, use_cache=False):
    # TODO Look for IPv6 also
    # TODO Look at more services from https://github.com/arkadiyt/aws_public_ips
    # TODO Integrate into something to more easily port scan and screenshot web services

    # Try reading from cache
    cache_file_path = 'account-data/{}/public_nodes.json'.format(
        account['name'])
    if use_cache:
        if os.path.isfile(cache_file_path):
            with open(cache_file_path) as f:
                return json.load(f), []

    # Get the data from the `prepare` command
    outputfilter = {
        'internal_edges': False,
        'read_replicas': False,
        'inter_rds_edges': False,
        'azs': False,
        'collapse_by_tag': None,
        'collapse_asgs': True,
        'mute': True
    }
    network = build_data_structure(account, config, outputfilter)

    public_nodes = []
    warnings = []

    # Look at all the edges for ones connected to the public Internet (0.0.0.0/0)
    for edge in pyjq.all(
            '.[].data|select(.type=="edge")|select(.source=="0.0.0.0/0")',
            network):

        # Find the node at the other end of this edge
        target = {'arn': edge['target'], 'account': account['name']}
        target_node = pyjq.first(
            '.[].data|select(.id=="{}")'.format(target['arn']), network, {})

        # Depending on the type of node, identify what the IP or hostname is
        if target_node['type'] == 'elb':
            target['type'] = 'elb'
            target['hostname'] = target_node['node_data']['DNSName']
        elif target_node['type'] == 'elbv2':
            target['type'] = 'elbv2'
            target['hostname'] = target_node['node_data']['DNSName']
        elif target_node['type'] == 'autoscaling':
            target['type'] = 'autoscaling'
            target['hostname'] = target_node['node_data'].get(
                'PublicIpAddress', '')
            if target['hostname'] == '':
                target['hostname'] = target_node['node_data']['PublicDnsName']
        elif target_node['type'] == 'rds':
            target['type'] = 'rds'
            target['hostname'] = target_node['node_data']['Endpoint'][
                'Address']
        elif target_node['type'] == 'ec2':
            target['type'] = 'ec2'
            dns_name = target_node['node_data'].get('PublicDnsName', '')
            target['hostname'] = target_node['node_data'].get(
                'PublicIpAddress', dns_name)
        elif target_node['type'] == 'ecs':
            target['type'] = 'ecs'
            target['hostname'] = ''
            for ip in target_node['node_data']['ips']:
                if is_public_ip(ip):
                    target['hostname'] = ip
        elif target_node['type'] == 'redshift':
            target['type'] = 'redshift'
            target['hostname'] = target_node['node_data'].get(
                'Endpoint', {}).get('Address', '')
        else:
            # Unknown node
            raise Exception('Unknown type: {}'.format(target_node['type']))

        # Check if any protocol is allowed (indicated by IpProtocol == -1)
        ingress = pyjq.all('.[]', edge.get('node_data', {}))

        sg_group_allowing_all_protocols = pyjq.first(
            'select(.IpPermissions[]|.IpProtocol=="-1")|.GroupId', ingress,
            None)
        public_sgs = {}
        if sg_group_allowing_all_protocols is not None:
            warnings.append(
                'All protocols allowed access to {} due to {}'.format(
                    target, sg_group_allowing_all_protocols))
            range_string = '0-65535'
            # I would need to redo this code in order to get the name of the security group
            public_sgs[sg_group_allowing_all_protocols] = {
                'public_ports': '0-65535'
            }
        else:
            # from_port and to_port mean the beginning and end of a port range
            # We only care about TCP (6) and UDP (17)
            # For more info see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html
            port_ranges = []
            for sg in ingress:
                sg_port_ranges = []
                for ip_permission in sg['IpPermissions']:
                    selection = 'select((.IpProtocol=="tcp") or (.IpProtocol=="udp")) | select(.IpRanges[].CidrIp=="0.0.0.0/0")'
                    sg_port_ranges.extend(
                        pyjq.all('{}| [.FromPort,.ToPort]'.format(selection),
                                 ip_permission))
                public_sgs[sg['GroupId']] = {
                    'GroupName':
                    sg['GroupName'],
                    'public_ports':
                    port_ranges_string(regroup_ranges(sg_port_ranges))
                }
                port_ranges.extend(sg_port_ranges)
            range_string = port_ranges_string(regroup_ranges(port_ranges))

            target['ports'] = range_string
            target['public_sgs'] = public_sgs
            if target['ports'] == "":
                issue_msg = 'No ports open for tcp or udp (probably can only be pinged). Rules that are not tcp or udp: {} -- {}'
                warnings.append(
                    issue_msg.format(
                        json.dumps(
                            pyjq.all(
                                '.[]|select((.IpProtocol!="tcp") and (.IpProtocol!="udp"))'
                                .format(selection), ingress)), account))
            public_nodes.append(target)

    # For the network diagram, if an ELB has availability across 3 subnets, I put one node in each subnet.
    # We don't care about that when we want to know what is public and it makes it confusing when you
    # see 3 resources with the same hostname, when you view your environment as only having one ELB.
    # This same issue exists for RDS.
    # Reduce these to single nodes.

    reduced_nodes = {}

    for node in public_nodes:
        reduced_nodes[node['hostname']] = node

    public_nodes = []
    for _, node in reduced_nodes.items():
        public_nodes.append(node)

    account = Account(None, account)
    for region_json in get_regions(account):
        region = Region(account, region_json)
        # Look for CloudFront
        if region.name == 'us-east-1':
            json_blob = query_aws(region.account,
                                  'cloudfront-list-distributions', region)

            for distribution in json_blob.get('DistributionList',
                                              {}).get('Items', []):
                if not distribution['Enabled']:
                    continue

                target = {'arn': distribution['ARN'], 'account': account.name}
                target['type'] = 'cloudfront'
                target['hostname'] = distribution['DomainName']
                target['ports'] = '80,443'

                public_nodes.append(target)

        # Look for API Gateway
        json_blob = query_aws(region.account, 'apigateway-get-rest-apis',
                              region)
        if json_blob is not None:
            for api in json_blob.get('items', []):
                target = {'arn': api['id'], 'account': account.name}
                target['type'] = 'apigateway'
                target['hostname'] = '{}.execute-api.{}.amazonaws.com'.format(
                    api['id'], region.name)
                target['ports'] = '80,443'

                public_nodes.append(target)

    # Write cache file
    with open(cache_file_path, 'w') as f:
        f.write(json.dumps(public_nodes, indent=4, sort_keys=True))

    return public_nodes, warnings
Exemplo n.º 22
0
def validate_vulnerability_response_id(content):
    return pyjq.first('.data.createVulnerability.vulnerability.id', content)
Exemplo n.º 23
0
def step_impl(context):
    resp = get_sippool_list(context)
    info = pyjq.first('.[]|select(.sip=="{0}")'.format(context.valid_sip),
                      resp)
    component_group_id = info["used_by"]
    assert component_group_id == context.component_group_id
Exemplo n.º 24
0
def validate_evidence_response(content):
    return pyjq.first('.data.createVulnerabilityEvidence.vulnEvidence.name',
                      content)
Exemplo n.º 25
0
def validate_repo_query(content):
    return pyjq.first('.data.repoByName', content)
Exemplo n.º 26
0
def validate_scan_response(content):
    return pyjq.first('.data.createScan.scan.name', content)
Exemplo n.º 27
0
def validate_test_case_query(content):
    return pyjq.first('.data.createOrUpdateTestCase.case.name', content)
Exemplo n.º 28
0
def validate_target_response(content):
    return pyjq.first('.data.createTarget.target.name', content)
Exemplo n.º 29
0
def validate_user_stories(content):
    return pyjq.first('.data.userStories', content)
Exemplo n.º 30
0
    def __k8s(self):
        kubectl_cmd = DockerCommand(
            self._cmd, self._context.opt['--docker-image-kubectl'],
            self._context.opt['--volume-from'], True)
        helm_cmd = DockerCommand(self._cmd,
                                 self._context.opt['--docker-image-helm'],
                                 self._context.opt['--volume-from'], True)

        # Use release name instead of the namespace name for release
        release = self.__getRelease().replace('/', '-')
        namespace = self.__getNamespace()
        host = self.__getHost()

        # Need to create default helm charts
        if self._context.opt['--create-default-helm']:
            # Check that the chart dir no exists
            if os.path.isdir('%s/templates' %
                             self._context.opt['--deploy-spec-dir']):
                raise ValueError(
                    'Directory %s/templates already exists, while --deploy-spec-dir has been selected.'
                    % self._context.opt['--deploy-spec-dir'])
            elif os.path.isfile('%s/values.yaml' %
                                self._context.opt['--deploy-spec-dir']):
                raise ValueError(
                    'File %s/values.yaml already exists, while --deploy-spec-dir has been selected.'
                    % self._context.opt['--deploy-spec-dir'])
            elif os.path.isfile('%s/Chart.yaml' %
                                self._context.opt['--deploy-spec-dir']):
                raise ValueError(
                    'File %s/Chart.yaml already exists, while --deploy-spec-dir has been selected.'
                    % self._context.opt['--deploy-spec-dir'])
            else:
                os.makedirs('%s/templates' %
                            self._context.opt['--deploy-spec-dir'])
                self._cmd.run_command('cp -R /cdp/k8s/charts/* %s/' %
                                      self._context.opt['--deploy-spec-dir'])
                with open(
                        '%s/Chart.yaml' %
                        self._context.opt['--deploy-spec-dir'],
                        'w') as outfile:
                    data = dict(apiVersion='v1',
                                description='A Helm chart for Kubernetes',
                                name=os.environ['CI_PROJECT_NAME'],
                                version='0.1.0')
                    yaml.dump(data, outfile)

        final_deploy_spec_dir = '%s_final' % self._context.opt[
            '--deploy-spec-dir']
        final_template_deploy_spec_dir = '%s/templates' % final_deploy_spec_dir
        try:
            os.makedirs(final_template_deploy_spec_dir)
            shutil.copyfile(
                '%s/Chart.yaml' % self._context.opt['--deploy-spec-dir'],
                '%s/Chart.yaml' % final_deploy_spec_dir)
        except OSError as e:
            LOG.error(str(e))

        command = 'upgrade %s' % release
        command = '%s %s' % (command, final_deploy_spec_dir)
        command = '%s --timeout %s' % (command, self._context.opt['--timeout'])
        set_command = '--set namespace=%s' % namespace

        #Deprecated, we will detect if tiller is available in our namespace or in kube-system
        if self._context.opt['--tiller-namespace']:
            command = '%s --tiller-namespace=%s' % (command, namespace)

        tiller_length = 0
        tiller_json = ''
        try:
            if not self._context.opt['--tiller-namespace']:
                tiller_json = ''.join(
                    kubectl_cmd.run(
                        'get pod --namespace %s -l name="tiller" -o json --ignore-not-found=false'
                        % (namespace)))
                tiller_length = len(
                    pyjq.first('.items[] | .metadata.labels.name',
                               json.loads(tiller_json)))
                command = '%s --tiller-namespace=%s' % (command, namespace)
        except Exception as e:
            # Not present
            LOG.verbose(str(e))
        # Need to create default helm charts
        if self._context.opt['--create-default-helm']:
            set_command = '%s --set service.internalPort=%s' % (
                set_command, self._context.opt['--internal-port'])

        if self._context.opt['--image-tag-latest']:
            tag = self.__getTagLatest()
            pullPolicy = 'Always'
        elif self._context.opt['--image-tag-sha1']:
            tag = self.__getTagSha1()
            pullPolicy = 'IfNotPresent'
        else:
            tag = self.__getTagBranchName()
            pullPolicy = 'Always'

        set_command = '%s --set ingress.host=%s' % (set_command, host)
        set_command = '%s --set ingress.subdomain=%s' % (
            set_command, os.getenv('CDP_DNS_SUBDOMAIN', None))
        set_command = '%s --set image.commit.sha=sha-%s' % (
            set_command, os.environ['CI_COMMIT_SHA'][:8])
        set_command = '%s --set image.registry=%s' % (set_command,
                                                      self._context.registry)
        set_command = '%s --set image.repository=%s' % (
            set_command, self._context.repository)
        set_command = '%s --set image.tag=%s' % (set_command, tag)
        set_command = '%s --set image.pullPolicy=%s' % (set_command,
                                                        pullPolicy)

        # Need to add secret file for docker registry
        if not self._context.opt['--use-aws-ecr'] and not self._context.opt[
                '--use-registry'] == 'aws-ecr':
            # Add secret (Only if secret is not exist )
            self._cmd.run_command(
                'cp /cdp/k8s/secret/cdp-secret.yaml %s/templates/' %
                self._context.opt['--deploy-spec-dir'])
            set_command = '%s --set image.credentials.username=%s' % (
                set_command, self._context.registry_user_ro)
            set_command = '%s --set image.credentials.password=%s' % (
                set_command, self._context.registry_token_ro)
            set_command = '%s --set image.imagePullSecrets=cdp-%s-%s' % (
                set_command, self._context.registry.replace(':', '-'), release)

        if self._context.opt['--create-gitlab-secret']:
            if os.getenv('CI_ENVIRONMENT_NAME', None) is None:
                LOG.err(
                    'Can not use gitlab secret because environment is not defined in gitlab job.'
                )
            secretEnvPattern = 'CDP_SECRET_%s_' % os.getenv(
                'CI_ENVIRONMENT_NAME', None)
            secretFileCreated = False
            #LOG.info('Looking for environnement variables starting with : %s' % secretEnvPattern)
            for envVar, envValue in dict(os.environ).items():
                if envVar.startswith(secretEnvPattern.upper(), 0):
                    if not secretFileCreated:
                        #LOG.info('Some secrets has been found ! Generating a kubernetes secret file !')
                        #Get the secret templates if we envVar to transform into secret
                        self._cmd.run_command(
                            'cp /cdp/k8s/secret/cdp-gitlab-secret.yaml %s/templates/'
                            % self._context.opt['--deploy-spec-dir'])
                        secretFileCreated = True
                    #For each envVar of the right environnement we had a line in the secret
                    self._cmd.run_secret_command(
                        'echo "  %s: \'%s\'" >> %s/templates/cdp-gitlab-secret.yaml'
                        % (envVar[len(secretEnvPattern):], envValue,
                           self._context.opt['--deploy-spec-dir']))

        command = '%s --debug' % command
        command = '%s -i' % command
        command = '%s --namespace=%s' % (command, namespace)
        command = '%s --force' % command
        command = '%s --wait' % command
        command = '%s --atomic' % command

        now = datetime.datetime.utcnow()
        date_format = '%Y-%m-%dT%H%M%S'
        if self._context.opt['--delete-labels']:
            command = '%s --description deletionTimestamp=%sZ' % (
                command, (now + datetime.timedelta(
                    minutes=int(self._context.opt['--delete-labels']))
                          ).strftime(date_format))
        elif namespace[:53] == self.__getName(False)[:53]:
            command = '%s --description deletionTimestamp=%sZ' % (command, (
                now +
                datetime.timedelta(minutes=int(240))).strftime(date_format))

        # Template charts for secret
        tmp_templating_file = '%s/all_resources.tmp' % final_deploy_spec_dir
        template_command = 'template %s' % self._context.opt[
            '--deploy-spec-dir']
        template_command = '%s %s' % (template_command, set_command)

        if self._context.opt['--values']:
            valuesFiles = self._context.opt['--values'].strip().split(',')
            values = '--values %s/' % self._context.opt['--deploy-spec-dir'] + (
                ' --values %s/' %
                self._context.opt['--deploy-spec-dir']).join(valuesFiles)
            template_command = '%s %s' % (template_command, values)

        template_command = '%s --name=%s' % (template_command, release)
        template_command = '%s --namespace=%s' % (template_command, namespace)
        template_command = '%s > %s' % (template_command, tmp_templating_file)
        helm_cmd.run(template_command)

        image_pull_secret_value = 'cdp-%s-%s' % (self._context.registry,
                                                 release)
        image_pull_secret_value = image_pull_secret_value.replace(':', '-')
        with open(tmp_templating_file, 'r') as stream:
            docs = list(yaml.load_all(stream))
            final_docs = []
            for doc in docs:
                if doc is not None:
                    LOG.verbose(doc)
                    final_docs.append(doc)
                    #Manage Deployement and StatefullSate
                    if not self._context.opt['--use-aws-ecr'] and not self._context.opt[
                            '--use-registry'] == 'aws-ecr' and 'kind' in doc and 'spec' in doc and (
                                'template' in doc['spec']
                                or 'jobTemplate' in doc['spec']):
                        doc = CLIDriver.addImageSecret(
                            doc, image_pull_secret_value)
        with open('%s/all_resources.yaml' % final_template_deploy_spec_dir,
                  'w') as outfile:
            LOG.info(yaml.dump_all(final_docs))
            yaml.dump_all(final_docs, outfile)

        # Install or Upgrade environnement
        helm_cmd.run(command)

        if self._context.opt['--delete-labels']:
            kubectl_cmd.run(
                'label namespace %s deletable=true creationTimestamp=%sZ deletionTimestamp=%sZ --namespace=%s --overwrite'
                % (namespace, now.strftime(date_format),
                   (now + datetime.timedelta(
                       minutes=int(self._context.opt['--delete-labels']))
                    ).strftime(date_format), namespace))
        elif not self._context.opt[
                '--delete-labels'] and namespace[:53] == self.__getName(
                    False)[:53]:
            kubectl_cmd.run(
                'label namespace %s deletable=true creationTimestamp=%sZ deletionTimestamp=%sZ --namespace=%s --overwrite'
                % (namespace, now.strftime(date_format),
                   (now + datetime.timedelta(minutes=int(240))
                    ).strftime(date_format), namespace))

        self.__update_environment()
Exemplo n.º 31
0
def validate_user_story(content):
    return pyjq.first('.data.createOrUpdateUserStory.userStory.shortName',
                      content)
Exemplo n.º 32
0
    def run(self):
        consumer = Consumer(self.config)
        callbacks = collections.defaultdict(list)
        functions = self.functions

        def close():
            log.info('Closing consumer')
            consumer.close()

        atexit.register(close)

        while True:
            add, update, remove = functions.refresh()
            if add or update or remove:
                existing_topics = set(callbacks.keys())

                for f in add:
                    callbacks[functions.arguments(f).get('topic')].append(f)
                for f in update:
                    pass
                for f in remove:
                    callbacks[functions.arguments(f).get('topic')].remove(f)

                interested_topics = set(callbacks.keys())

                if existing_topics.symmetric_difference(interested_topics):
                    log.debug(f'Subscribing to {interested_topics}')
                    consumer.subscribe(list(interested_topics))

            log.debug('Before polling...')

            message = consumer.poll(timeout=functions.refresh_interval)

            log.debug('After polling...')

            if not message:
                log.debug('Empty message received')
            elif not message.error():

                log.debug('Key:' + str(message.key()) + ' Value:' +
                          str(message.value()))

                topic, key, value = message.topic(), \
                                    message.key(), \
                                    message.value()
                try:
                    key = message.key().decode('utf-8')
                except:
                    pass
                try:
                    value = json.loads(value)
                except:
                    pass
                for function in callbacks[topic]:
                    jq_filter = functions.arguments(function).get('filter')
                    try:
                        if jq_filter and not pyjq.first(jq_filter, value):
                            continue
                    except:
                        log.error(
                            f'Could not filter message value with {jq_filter}')

                    data = self.function_data(function, topic, key, value)

                    log.debug('Invoking a function with data:' + str(data))

                    functions.gateway.post(functions._gateway_base +
                                           f'/function/{function["name"]}',
                                           data=data)
                    log.debug('Function: ' + f'/function/{function["name"]}' +
                              ' Data:' + str(data))

                # if auto commit not enabled, manually commit the messages
                if not bool(os.getenv('ENABLE_AUTO_COMMIT', 'True')):
                    current_msg = message
                    consumer.commit(message=current_msg, async=False)
Exemplo n.º 33
0
 def test_first(self):
     self.assertEqual(
         pyjq.first('.[] | . + $foo', ['val1', 'val2'], vars=dict(foo='bar')),
         'val1bar'
     )
Exemplo n.º 34
0
def step_impl(context):
    assert context.mysql_group
    mysql_group_id = context.mysql_group[0]["mysql_group_id"]
    instances = get_mysql_info_in_group(context, mysql_group_id)
    master_instance = pyjq.first('.[]|select(.mysql_instance_role=="STATUS_MYSQL_MASTER")', instances)
    context.mysql_instance = master_instance
Exemplo n.º 35
0
 def condition(context, flag):
     resp = api_post(context, context.version_3 + "/mysql/config/list", body)
     config = pyjq.first('.data[]', resp)
     if config["mysql_instance_option_current_value"] == option_value:
         return True
Exemplo n.º 36
0
 def _current(self):
     headers = {'Authorization': 'Bearer ' + self.config['token'], 'accept': 'application/json'}
     url = 'https://api.nature.global/1/appliances'
     res = requests.get(url, headers=headers)
     q = '.[] | select(.id=="{}") | .settings'.format(self.config['appliance'])
     return int(pyjq.first(q, res.json())['temp'])