Esempio n. 1
0
def test_get_vault_data_cyberark(monkeypatch):
    monkeypatch.setattr('processor.connector.vault.config_value',
                        mock_config_value_cybeark)
    monkeypatch.setattr('processor.connector.vault.Popen', Popen)
    from processor.connector.vault import get_vault_data
    val = get_vault_data(None)
    assert val == 'secret'
    val = get_vault_data('abcd')
    assert val == 'secret'
Esempio n. 2
0
def test_get_vault_data(monkeypatch):
    monkeypatch.setattr('processor.connector.vault.config_value',
                        mock_config_value)
    monkeypatch.setattr('processor.connector.vault.get_vault_access_token',
                        mock_get_vault_access_token)
    monkeypatch.setattr('processor.connector.vault.get_keyvault_secret',
                        mock_get_keyvault_secret)
    from processor.connector.vault import get_vault_data
    val = get_vault_data(None)
    assert val is None
    val = get_vault_data('abcd')
    assert val == 'secret'
Esempio n. 3
0
def generate_gce(google_data, project, user):
    """
    Generate client secret json from the google data
    """
    logger.info("Generating GCE")
    gce = {
        "type": get_field_value(user, "type"),
        "project_id": get_field_value(project, "project-id"),
        "private_key_id": get_field_value(user, "private_key_id"),
        "private_key": get_field_value(user, "private_key"),
        "client_email": get_field_value(user, "client_email"),
        "client_id": get_field_value(user, "client_id"),
        "auth_uri": get_field_value(google_data, "auth_uri"),
        "token_uri": get_field_value(google_data, "token_uri"),
        "auth_provider_x509_cert_url": get_field_value(google_data, "auth_provider_x509_cert_url"),
        "client_x509_cert_url": get_field_value(user, "client_x509_cert_url"),
    }

    # Read the private key from the key path
    if not gce['private_key'] and get_field_value(user, "private_key_path"):
        private_key_path = get_field_value(user, "private_key_path")
        logger.info("Private key path : %s ", private_key_path)
        try:
            gce['private_key'] = open(private_key_path, 'r', encoding="utf-8").read().replace("\\n","\n")
            if gce['private_key']:
                logger.info('Private key from Private key path, Secret: %s', '*' * len(gce['private_key']))
        except Exception as e:
            raise Exception("Private key does not exist at given private key path : %s " % str(e))
        
        if not gce['private_key']:
            raise Exception("Private key does not exist at given private key path : %s " % private_key_path)
    
    # Read the private key from the vault
    if not gce['private_key']:
        private_key = get_vault_data(gce['private_key_id'])
        if private_key:
            gce["private_key"] = private_key.replace("\\n","\n")
            if gce["private_key"]:
                logger.info('Private key from vault Secret: %s', '*' * len(gce["private_key"]))
        elif get_from_currentdata(CUSTOMER):
            raise Exception("Private key does not set in a vault")
    
    if not gce['private_key']:
        raise Exception("No `private_key` field in the connector file to access Google resource!...")

    if (None in list(gce.values())):
        raise Exception("Connector file does not contains valid values or some fields are missing for access Google resources.")

    return gce
def get_private_key(gce):
    """
    Fetches the Private Key for get the google service account credentials
    """
    if ('UAMI' not in os.environ or os.environ['UAMI'] != 'true'):
        # if private_key does not exist then it will set to None:
        gce["private_key"] = get_field_value(gce, 'private_key')

    if ('UAMI' in os.environ
            and os.environ['UAMI'] == 'true') or not gce["private_key"]:
        private_key = get_vault_data(gce['private_key_id'])
        if private_key:
            gce["private_key"] = private_key.replace("\\n", "\n")
        else:
            raise Exception("Private key does not set in a vault")

    return gce
Esempio n. 5
0
def generate_azure_vault_key():

    key = input("Enter the key to add or update its password: "******"Regenerating password for key: ", key)
    elif is_created:
        set_key_visbility(key, EDITABLE)
        print("Creating and generating password for key: ", key)
    else:
        print("Getting issue while generating key:", key)
Esempio n. 6
0
def get_client_secret(kubernetes_structure_data,snapshot_serviceAccount,snapshot_namespace):
    """
    get_client_secret get service account from master snapshot and will 
    compare with other service accounts which allocated in kubernetes
    structure file and get secret of service account if it’s exist in
    structure file. Also check environment variables if service account
    secret isn’t exist in structure file with the name got from snap shot file.
    This function return secret as string which will use to get connection with 
    kubernetes cluster.

    """
    global Cache_namespace,Cache_secret
    if snapshot_namespace  == Cache_namespace:
        return Cache_secret


    namespaces = get_field_value(kubernetes_structure_data,'namespaces')
    service_account_secret = ""
    for namespace in namespaces :
        service_accounts = get_field_value(namespace,'serviceAccounts')
        for service_account in service_accounts :
            if snapshot_serviceAccount == service_account['name'] and namespace['namespace'] in snapshot_namespace :
                service_account_secret = get_field_value(service_account,'secret')
                if service_account_secret is not None:
                    Cache_secret= service_account_secret
                    Cache_namespace = snapshot_namespace
                    return service_account_secret
                else :
                    service_account_secret = get_vault_data(service_account['id'])
                    if  service_account_secret is not None:
                        Cache_secret= service_account_secret
                        Cache_namespace = snapshot_namespace
            

    
   
            

    
    if service_account_secret == "" :
        logger.error("\t\t ERROR : can not find secret for service account : %s" % (snapshot_serviceAccount))

    return service_account_secret 
Esempio n. 7
0
def git_clone_dir(connector):
    clonedir = None
    repopath = tempfile.mkdtemp()
    subdir = False
    if connector and isinstance(connector, dict):
        giturl = get_field_value(connector, 'gitProvider')
        if not giturl:
            logger.error("Git connector does not have valid git provider URL")
            return repopath, clonedir
        
        branch = get_from_currentdata('branch')
        if not branch:
            branch = get_field_value_with_default(connector, 'branchName', 'master')

        isprivate = get_field_value(connector, 'private')
        isprivate = True if isprivate is None or not isinstance(isprivate, bool) else isprivate
        # logger.info("Repopath: %s", repopath)
        logger.info("\t\t\tRepopath: %s", repopath)
        http_match = re.match(r'^http(s)?://', giturl, re.I)
        if http_match:
            logger.info("\t\t\tHttp (private:%s) giturl: %s", "YES" if isprivate else "NO", giturl)

            accessToken = get_field_value(connector, 'httpsAccessToken')
            username = get_field_value(connector, 'httpsUser')
            if accessToken:
                logger.info("AccessToken: %s" % accessToken)
                pwd = get_field_value(connector, 'httpsPassword')
                pwd = pwd if pwd else get_git_pwd(key=accessToken)
                if not pwd:
                    pwd = get_pwd_from_vault(accessToken)
                    if pwd:
                        logger.info("Git access token from vault: %s", '*' * len(pwd))
                if pwd:
                    gh = GithubFunctions()
                    gh.set_access_token(pwd)
                    _ = gh.populate_user()
                    rpo = gh.clone_repo(giturl, repopath, branch)
                    if rpo:
                        logger.info('Successfully cloned in %s dir' % repopath)
                        checkdir = '%s/tmpclone' % repopath if subdir else repopath
                        clonedir = checkdir if exists_dir('%s/.git' % checkdir) else None
                        if not exists_dir(clonedir):
                            logger.error("No valid data provided for connect to git : %s", giturl)
                        return repopath, clonedir
                    elif isprivate:
                        logger.error("Please provide password for connect to git repository.")
                        return repopath, clonedir
                    else:
                        git_cmd = 'git clone %s %s' % (giturl, repopath)
            elif username:
                pwd = get_field_value(connector, 'httpsPassword')
                schema = giturl[:http_match.span()[-1]]
                other_part = giturl[http_match.span()[-1]:]
                # pwd = pwd if (pwd and not json_source()) else (get_git_pwd() if not json_source() else get_pwd_from_vault(pwd))
                pwd = pwd if pwd else get_git_pwd(key=username)

                # populate the password from vault
                if not pwd:
                    pwd = get_pwd_from_vault(username)
                    if pwd:
                        logger.info("Git password from vault: %s", '*' * len(pwd))
                if pwd:
                    git_cmd = 'git clone --depth 1 %s%s:%s@%s %s' % (schema, urllib.parse.quote_plus(username),
                                                        urllib.parse.quote_plus(pwd), other_part, repopath)
                elif isprivate:
                    logger.error("Please provide password for connect to git repository.")
                    return repopath, clonedir
                else:
                    git_cmd = 'git clone --depth 1 %s%s:%s@%s %s' % (schema, urllib.parse.quote_plus(username), "",
                                                     other_part, repopath)
            else:
                git_cmd = 'git clone --depth 1 %s %s' % (giturl, repopath)
        else:
            logger.info("SSH (private:%s) giturl: %s, Repopath: %s", "YES" if isprivate else "NO",
                        giturl, repopath)
            if isprivate:
                ssh_key_file = get_field_value(connector, 'sshKeyfile')
                ssh_key_name = get_field_value(connector, 'sshKeyName')
                ssh_key_file_data = None
                if ssh_key_file:
                    if not exists_file(ssh_key_file):
                        logger.error("Git connector points to a non-existent ssh keyfile!")
                        return repopath, clonedir
                elif ssh_key_name:
                    ssh_key_file_data = get_vault_data(ssh_key_name)
                    if not ssh_key_file_data:
                        logger.info('Git connector points to a non-existent ssh keyName in the vault!')
                        return repopath, clonedir
                ssh_host = get_field_value(connector, 'sshHost')
                ssh_user = get_field_value_with_default(connector, 'sshUser', 'git')
                if not ssh_host:
                    logger.error("SSH host not set, could be like github.com, gitlab.com, 192.168.1.45 etc")
                    return repopath, clonedir
                ssh_dir = '%s/.ssh' % repopath
                if exists_dir(ssh_dir):
                    logger.error("Git ssh dir: %s already exists, cannot recreate it!", ssh_dir)
                    return repopath, clonedir
                os.mkdir('%s/.ssh' % repopath, 0o700)
                if not ssh_key_file and ssh_key_name and ssh_key_file_data:
                    ssh_key_file = create_ssh_file_vault_data(ssh_dir, ssh_key_file_data, ssh_key_name)
                    if not ssh_key_file:
                        logger.info('Git connector points to a non-existent ssh keyName in the vault!')
                        return repopath, clonedir
                ssh_cfg = create_ssh_config(ssh_dir, ssh_key_file, ssh_user)
                if not ssh_cfg:
                    logger.error("Creation of Git ssh config in dir: %s failed!", ssh_dir)
                    return repopath, clonedir
                git_ssh_cmd = 'ssh -o "StrictHostKeyChecking=no" -F %s' % ssh_cfg
                git_cmd = 'git clone %s %s/tmpclone' % (giturl, repopath)
                subdir = True
            else:
                git_ssh_cmd = 'ssh -o "StrictHostKeyChecking=no"'
                git_cmd = 'git clone %s %s' % (giturl, repopath)
            os.environ['GIT_SSH_COMMAND'] = git_ssh_cmd
        git_cmd = '%s --branch %s' % (git_cmd, branch)
        if git_cmd:
            error_result, result = run_subprocess_cmd(git_cmd)
            checkdir = '%s/tmpclone' % repopath if subdir else repopath
            clonedir = checkdir if exists_dir('%s/.git' % checkdir) else None
            if not exists_dir(clonedir):
                logger.error("No valid data provided for connect to git : %s", error_result)
        if 'GIT_SSH_COMMAND' in os.environ:
            os.environ.pop('GIT_SSH_COMMAND')
    return repopath, clonedir
Esempio n. 8
0
def get_pwd_from_vault(password_key):
    """ Return the git password from vault """
    password = get_vault_data(password_key)
    if not password:
        logger.info("Password does not set in the vault")
    return password
Esempio n. 9
0
def populate_azure_snapshot(snapshot, container=None, snapshot_type='azure'):
    """ Populates the resources from azure."""
    dbname = config_value('MONGODB', 'dbname')
    snapshot_source = get_field_value(snapshot, 'source')
    snapshot_user = get_field_value(snapshot, 'testUser')
    snapshot_nodes = get_field_value(snapshot, 'nodes')
    snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes)
    client_id, client_secret, sub_name, sub_id, tenant_id = \
        get_web_client_data(snapshot_type, snapshot_source, snapshot_user)
    if not client_id:
        # logger.info("No client_id in the snapshot to access azure resource!...")
        raise Exception("No client id in the snapshot to access azure resource!...")

    # Read the client secrets from envirnment variable
    if not client_secret:
        client_secret = os.getenv(snapshot_user, None)
        if client_secret:
            logger.info('Client Secret from environment variable, Secret: %s', '*' * len(client_secret))
        
    # Read the client secrets from the vault
    if not client_secret:
        client_secret = get_vault_data(client_id)
        if client_secret:
            logger.info('Client Secret from Vault, Secret: %s', '*' * len(client_secret))
        elif get_from_currentdata(CUSTOMER):
            logger.error("Client Secret key does not set in a vault")
            raise Exception("Client Secret key does not set in a vault")

    if not client_secret:
        raise Exception("No `client_secret` key in the connector file to access azure resource!...")

    logger.info('\t\tSubscription: %s', sub_id)
    logger.info('\t\tTenant: %s', tenant_id)
    logger.info('\t\tclient: %s', client_id)
    put_in_currentdata('clientId', client_id)
    put_in_currentdata('clientSecret', client_secret)
    put_in_currentdata('subscriptionId', sub_id)
    put_in_currentdata('tenant_id', tenant_id)
    token = get_access_token()
    logger.debug('TOKEN: %s', token)
    if not token:
        logger.info("Unable to get access token, will not run tests....")
        raise Exception("Unable to get access token, will not run tests....")
        # return {}

    # snapshot_nodes = get_field_value(snapshot, 'nodes')
    # snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes)
    if valid_snapshotids and token and snapshot_nodes:
        for node in snapshot_nodes:
            validate = node['validate'] if 'validate' in node else True
            if 'path' in  node:
                data = get_node(token, sub_name, sub_id, node, snapshot_user, snapshot_source)
                if data:
                    if validate:
                        if get_dbtests():
                            if get_collection_size(data['collection']) == 0:
                                # Creating indexes for collection
                                create_indexes(
                                    data['collection'], 
                                    config_value(DATABASE, DBNAME), 
                                    [
                                        ('snapshotId', pymongo.ASCENDING),
                                        ('timestamp', pymongo.DESCENDING)
                                    ]
                                )

                                create_indexes(
                                    data['collection'], 
                                    config_value(DATABASE, DBNAME), 
                                    [
                                        ('_id', pymongo.DESCENDING),
                                        ('timestamp', pymongo.DESCENDING),
                                        ('snapshotId', pymongo.ASCENDING)
                                    ]
                                )
                            insert_one_document(data, data['collection'], dbname, check_keys=False)
                        else:
                            snapshot_dir = make_snapshots_dir(container)
                            if snapshot_dir:
                                store_snapshot(snapshot_dir, data)
                        if 'masterSnapshotId' in node:
                            snapshot_data[node['snapshotId']] = node['masterSnapshotId']
                        else:
                            snapshot_data[node['snapshotId']] = True
                    # else:
                    #     snapshot_data[node['snapshotId']] = False
                    node['status'] = 'active'
                else:
                    # TODO alert if notification enabled or summary for inactive.
                    node['status'] = 'inactive'
                logger.debug('Type: %s', type(data))
            else:
                alldata = get_all_nodes(
                    token, sub_name, sub_id, node, snapshot_user, snapshot_source)
                if alldata:
                    snapshot_data[node['masterSnapshotId']] = []
                    for data in alldata:
                        # insert_one_document(data, data['collection'], dbname)
                        found_old_record = False
                        for masterSnapshotId, snapshot_list in snapshot_data.items():
                            old_record = None
                            if isinstance(snapshot_list, list):
                                for item in snapshot_list:
                                    if item["path"] == data['path']:
                                        old_record = item

                                if old_record:
                                    found_old_record = True
                                    if node['masterSnapshotId'] not in old_record['masterSnapshotId']:
                                        old_record['masterSnapshotId'].append(
                                            node['masterSnapshotId'])

                        if not found_old_record:
                            snapshot_data[node['masterSnapshotId']].append(
                                {
                                    'masterSnapshotId': [node['masterSnapshotId']],
                                    'snapshotId': data['snapshotId'],
                                    'path': data['path'],
                                    'validate': validate,
                                    'status': 'active'
                                })
                    # snapshot_data[node['masterSnapshotId']] = True
                logger.debug('Type: %s', type(alldata))
        delete_from_currentdata('resources')
        delete_from_currentdata('clientId')
        delete_from_currentdata('client_secret')
        delete_from_currentdata('subscriptionId')
        delete_from_currentdata('tenant_id')
        delete_from_currentdata('token')
    return snapshot_data
def populate_aws_snapshot(snapshot, container=None):
    """
    This is an entrypoint for populating a snapshot of type aws.
    All snapshot connectors should take snapshot object and based on
    'source' field create a method to connect to the service for the
    connector.
    The 'source' field could be used by more than one snapshot, so the
    'testuser' attribute should match to the user the 'source'
    """
    dbname = config_value('MONGODB', 'dbname')
    snapshot_source = get_field_value(snapshot, 'source')
    snapshot_user = get_field_value(snapshot, 'testUser')
    account_id = get_field_value(snapshot, 'accountId')
    sub_data = get_aws_data(snapshot_source)
    snapshot_nodes = get_field_value(snapshot, 'nodes')
    snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes)
    # valid_snapshotids = True
    # if snapshot_nodes:
    #     for node in snapshot_nodes:
    #         snapshot_data[node['snapshotId']] = False
    #         if not isinstance(node['snapshotId'], str):
    #             valid_snapshotids = False
    # if not valid_snapshotids:
    #     logger.error('All snap')
    if valid_snapshotids and sub_data and snapshot_nodes:
        logger.debug(sub_data)
        access_key, secret_access, region, connector_client_str = \
            get_aws_client_data(sub_data, snapshot_user, account_id)
        if not access_key:
            logger.info(
                "No access_key in the snapshot to access aws resource!...")
            raise Exception(
                "No access_key in the snapshot to access aws resource!...")
            # return snapshot_data

        # Read the client secrets from envirnment variable or Standard input
        # if not secret_access and ('UAMI' not in os.environ or os.environ['UAMI'] != 'true'):
        #     secret_access = get_client_secret()
        #     logger.info('Environment variable or Standard input, Secret: %s', '*' * len(secret_access))

        # Read the client secrets from the vault
        if not secret_access:
            secret_access = get_vault_data(access_key)
            if secret_access:
                logger.info('Vault Secret: %s', '*' * len(secret_access))
            else:
                logger.info("Secret Access key does not set in a vault")
                raise Exception("Secret Access key does not set in a vault")
        if not secret_access:
            logger.info(
                "No secret_access in the snapshot to access aws resource!...")
            return snapshot_data
        if access_key and secret_access:
            # existing_aws_client = {}
            for node in snapshot['nodes']:
                mastercode = False
                if 'snapshotId' in node:
                    client_str, aws_region = _get_aws_client_data_from_node(
                        node,
                        default_client=connector_client_str,
                        default_region=region)
                    if not _validate_client_name(client_str):
                        logger.error("Invalid Client Name")
                        return snapshot_data
                    try:
                        awsclient = client(client_str.lower(),
                                           aws_access_key_id=access_key,
                                           aws_secret_access_key=secret_access,
                                           region_name=aws_region)
                    except Exception as ex:
                        logger.info('Unable to create AWS client: %s', ex)
                        awsclient = None
                    logger.info(awsclient)
                    if awsclient:
                        data = get_node(awsclient, node, snapshot_source)
                        if data:
                            error_str = data.pop('error', None)
                            if get_dbtests():
                                if get_collection_size(
                                        data['collection']) == 0:
                                    #Creating indexes for collection
                                    create_indexes(
                                        data['collection'],
                                        config_value(DATABASE, DBNAME),
                                        [('snapshotId', pymongo.ASCENDING),
                                         ('timestamp', pymongo.DESCENDING)])
                                check_key = is_check_keys_required(data)
                                insert_one_document(data, data['collection'],
                                                    dbname, check_key)
                            else:
                                snapshot_dir = make_snapshots_dir(container)
                                if snapshot_dir:
                                    store_snapshot(snapshot_dir, data)
                            if 'masterSnapshotId' in node:
                                snapshot_data[node['snapshotId']] = node[
                                    'masterSnapshotId']
                            else:
                                snapshot_data[node[
                                    'snapshotId']] = False if error_str else True
                elif 'masterSnapshotId' in node:
                    mastercode = True
                    client_str, aws_region = _get_aws_client_data_from_node(
                        node,
                        default_client=connector_client_str,
                        default_region=region)
                    if not _validate_client_name(client_str):
                        logger.error("Invalid Client Name")
                        return snapshot_data
                    if aws_region:
                        all_regions = [aws_region]
                    else:
                        all_regions = Session().get_available_regions(
                            client_str.lower())
                        if client_str.lower() in ['s3', 'cloudtrail']:
                            all_regions = ['us-west-1']
                    logger.info("Length of all regions is %s" %
                                (str(len(all_regions))))
                    count = 0
                    snapshot_data[node['masterSnapshotId']] = []
                    for each_region in all_regions:
                        logger.info(each_region)
                        try:
                            awsclient = client(
                                client_str.lower(),
                                aws_access_key_id=access_key,
                                aws_secret_access_key=secret_access,
                                region_name=each_region)
                        except Exception as ex:
                            logger.info('Unable to create AWS client: %s', ex)
                        logger.info(awsclient)
                        if awsclient:
                            all_data = get_all_nodes(awsclient, node, snapshot,
                                                     sub_data)
                            if all_data:
                                for data in all_data:
                                    snapshot_data[
                                        node['masterSnapshotId']].append({
                                            'snapshotId':
                                            '%s%s' % (node['masterSnapshotId'],
                                                      str(count)),
                                            'validate':
                                            True,
                                            'detailMethods':
                                            data['detailMethods'],
                                            'structure':
                                            'aws',
                                            'masterSnapshotId':
                                            node['masterSnapshotId'],
                                            'collection':
                                            data['collection'],
                                            'arn':
                                            data['arn']
                                        })
                                    count += 1
            if mastercode:
                snapshot_data = eliminate_duplicate_snapshots(snapshot_data)
    return snapshot_data
Esempio n. 11
0
def populate_snapshot_azure(snapshot_json, fssnapshot):
    """ Populates the resources from azure."""
    snapshot_data, valid_snapshotids = fssnapshot.validate_snapshot_ids_in_nodes(
        snapshot_json)
    client_id, client_secret, sub_name, sub_id, tenant_id, connector_type = get_web_client_data(
        snapshot_json, fssnapshot)

    if not client_id:
        logger.info(
            "No client_id in the snapshot to access azure resource!...")
        # raise Exception("No client id in the snapshot to access azure resource!...")
        raise SnapshotsException(
            "Container %s failure as no client id in the snapshot to access azure resource!..."
            % fssnapshot.container)

    # Read the client secrets from the vault
    if not client_secret:
        client_secret = get_vault_data(client_id)
        if client_secret:
            logger.info('Client Secret from Vault, Secret: %s',
                        '*' * len(client_secret))
        elif fssnapshot.get_value(CUSTOMER):
            logger.error("Client Secret key does not set in a vault")
            raise SnapshotsException(
                "Client Secret key does not set in a vault")

    if not client_secret:
        raise SnapshotsException(
            "No `client_secret` key in the connector file to access azure resource!..."
        )

    logger.info('Sub:%s, tenant:%s, client: %s', sub_id, tenant_id, client_id)
    fssnapshot.store_value('clientId', client_id)
    fssnapshot.store_value('clientSecret', client_secret)
    fssnapshot.store_value('subscriptionId', sub_id)
    fssnapshot.store_value('tenant_id', tenant_id)
    token = get_access_token()
    logger.debug('TOKEN: %s', token)
    if not token:
        logger.info("Unable to get access token, will not run tests....")
        raise SnapshotsException(
            "Unable to get access token, will not run tests....")

    snapshot_source = get_field_value(snapshot_json, 'source')
    snapshot_user = get_field_value(snapshot_json, 'testUser')
    for node in fssnapshot.get_snapshot_nodes(snapshot_json):
        validate = node['validate'] if 'validate' in node else True
        if 'path' in node:
            data = get_snapshot_node(fssnapshot, token, sub_name, sub_id, node,
                                     snapshot_user, snapshot_source,
                                     connector_type)
            if data and validate:
                fssnapshot.store_data_node(data)
                snapshot_data[node['snapshotId']] = node[
                    'masterSnapshotId'] if 'masterSnapshotId' in node else True
                node['status'] = 'active'
            else:
                # TODO alert if notification enabled or summary for inactive.
                node['status'] = 'inactive'
            logger.debug('Type: %s', type(data))
        else:
            # Crawler Operation
            alldata = get_snapshot_nodes(fssnapshot, token, sub_name, sub_id,
                                         node, snapshot_user, snapshot_source,
                                         connector_type)
            if alldata:
                snapshot_data[node['masterSnapshotId']] = []
                for data in alldata:
                    found_old_record = False
                    for masterSnapshotId, snapshot_list in snapshot_data.items(
                    ):
                        old_record = None
                        if isinstance(snapshot_list, list):
                            for item in snapshot_list:
                                if item["path"] == data['path']:
                                    old_record = item

                            if old_record:
                                found_old_record = True
                                if node['masterSnapshotId'] not in old_record[
                                        'masterSnapshotId']:
                                    old_record['masterSnapshotId'].append(
                                        node['masterSnapshotId'])

                    if not found_old_record:
                        snapshot_data[node['masterSnapshotId']].append({
                            'masterSnapshotId': [node['masterSnapshotId']],
                            'snapshotId':
                            data['snapshotId'],
                            'path':
                            data['path'],
                            'validate':
                            validate,
                            'status':
                            'active'
                        })
            logger.debug('Type: %s', type(alldata))
    return snapshot_data