Example #1
0
def edit_dbGaP_authentication_list(nih_username):

    nih_username = nih_username.upper()

    # 1. read the contents of the dbGaP authentication list
    storage_service = get_storage_resource()
    filename = settings.DBGAP_AUTHENTICATION_LIST_FILENAME
    bucket_name = settings.DBGAP_AUTHENTICATION_LIST_BUCKET
    req = storage_service.objects().get_media(bucket=bucket_name,
                                              object=filename)
    dbGaP_authorized_list = req.execute()
    rows = [row.strip() for row in dbGaP_authorized_list.split('\n') if row.strip()]

    # 2. remove the line with the offending nih_username
    for row in rows:
        # this will remove not only the user with the nih_username
        # but also everyone who is a downloader for that user
        if nih_username in row:
            try:
                rows.remove(row)
            except ValueError as e:
                logger.warn("Couldn't remove a row from {}. {}".format(
                    settings.DBGAP_AUTHENTICATION_LIST_FILENAME, e))

    # 3. reinsert the new dbGaP authentication list
    new_authentication_list = "\n".join(rows)
    media = http.MediaIoBaseUpload(io.BytesIO(new_authentication_list), 'text/plain')
    req = storage_service.objects().insert(bucket=bucket_name,
                                           name=filename,
                                           media_body=media
                                           )
    req.execute()
    logger.info("NIH user {} removed from {}".format(
        nih_username, settings.DBGAP_AUTHENTICATION_LIST_FILENAME))
Example #2
0
def get_nih_authorized_list(request):

    storage_service = get_storage_resource()
    filename = settings.DBGAP_AUTHENTICATION_LIST_FILENAME
    bucket_name = settings.DBGAP_AUTHENTICATION_LIST_BUCKET

    req = storage_service.objects().get_media(bucket=bucket_name,
                                              object=filename)
    contents = req.execute()
    scrub_nih_users(contents)

    return HttpResponse('')
Example #3
0
def load_billing_to_bigquery(request):
    """Main: Read the file from storage and load into BigQuery
    """
    env = os.getenv('SERVER_SOFTWARE')

    for num in range(36):
        load_date = (datetime.datetime.now() + datetime.timedelta(days=-num-1))

        # construct the service object for the interacting with the Cloud Storage API
        if env.startswith('Google App Engine/'):
            service = get_storage_resource()
        else:
            service = get_special_storage_resource()

        print >> sys.stderr, '>< Load billing json from date: {}'.format(load_date.strftime("%Y-%m-%d"))
        logger.info('>< Load billing json from date: {}'.format(load_date.strftime("%Y-%m-%d")))

        # some params
        table_id = 'billing_' + load_date.strftime("%Y%m%d")
        file_to_load = 'billing-' + load_date.strftime("%Y-%m-%d") + '.json'
        file_to_upload = 'intermediary/' + file_to_load
        gcs_load_file = 'gs://' + GCS_BUCKET + '/' + file_to_upload

        # read the file from the google cloud storage
        file_info = read_file_from_gcs(service, GCS_BUCKET, file_to_load)

        # process the file - flatten the json, convert to new-line delimited
        try:
            upload_fh = preprocess_file(file_info)
        except TypeError, e:
            print >> sys.stderr, '\nBarfed on preprocess_file date: {}. Error: {}. File info: {}'\
                .format(load_date.strftime("%Y-%m-%d"), e, file_info)
            continue
        else:
            print >> sys.stderr, '\nSuccess! {}'.format(load_date.strftime("%Y-%m-%d"))

        # upload the processed file to google cloud storage
        upload_file_to_gcs(service, GCS_BUCKET, upload_fh, file_to_upload)
        upload_fh.close()  # do we need to close?(no buffer?)

        # load the uploaded file from the storage(new-line delimited) into bigquery
        # create a new table, replacing the contents
        print >> sys.stderr, '<> Loading file from storage into BigQuery'
        logger.info('<> Loading file from storage into BigQuery')
        load_data_from_csv.run(PROJECT_ID, BQ_DATASET, table_id, BILLING_SCHEMA,
                               gcs_load_file, 'NEWLINE_DELIMITED_JSON',
                               'WRITE_TRUNCATE')
Example #4
0
def CloudSQL_logging(request):

    filenames = get_binary_log_filenames()
    yesterdays_binary_log_file = filenames[-2]
    logger.info("Yesterday's binary log file: " + str(yesterdays_binary_log_file))
    arglist = ['mysqlbinlog',
               '--read-from-remote-server',
               yesterdays_binary_log_file,
               '--host',
               settings.DATABASES['default']['HOST'],
               '--user',
               settings.DATABASES['default']['USER'],
               '--base64-output=DECODE-ROWS',
               '--verbose',
               '--password',
               settings.DATABASES['default']['PASSWORD'],
               '--ssl-ca=' + settings.DATABASES['default']['OPTIONS']['ssl']['ca'],
               '--ssl-cert=' + settings.DATABASES['default']['OPTIONS']['ssl']['cert'],
               '--ssl-key=' + settings.DATABASES['default']['OPTIONS']['ssl']['key']
               ]

    child = pexpect.spawn(' '.join(arglist))
    child.expect('Enter password:'******'default']['PASSWORD'])
    i = child.expect(['Permission denied', 'Terminal type', '[#\$] '])
    if i == 2:
        output = child.read()
        date_start_char = output.find('#1')
        if date_start_char:  # if date_star_char is not zero
            date_str = output[date_start_char+1:date_start_char+7]
        else:
            utc_now = datetime.datetime.utcnow()
            date_str = str(utc_now.year)[2:] + str(utc_now.month) + str(utc_now.day-1) + '?'
        storage_service = get_storage_resource()
        media = http.MediaIoBaseUpload(io.BytesIO(output), 'text/plain')
        filename = 'cloudsql_activity_log_' + date_str + '.txt'
        storage_service.objects().insert(bucket='isb-cgc_logs',
                                         name=filename,
                                         media_body=media,
                                         ).execute()
    else:
        logger.warn("Logs were not written to cloudstorage, i = " + str(i))
        return HttpResponse("Logs were not written to cloudstorage, i = " + str(i))

    return HttpResponse('')
Example #5
0
def log_acls(request):
    """log acls"""
    client = get_storage_resource()
    all_projects = ['isb-cgc', 'isb-cgc-data-01', 'isb-cgc-data-02', 'isb-cgc-test']
    acls = {}
    defacls = {}
    # Iterate through projects and buckets and get acls
    for project in all_projects:
        for bucket in list_buckets(client, project):
            acl = get_bucket_acl(client, bucket['name'])
            defacl = get_bucket_defacl(client, bucket['name'])
            acls[bucket['name']] = acl
            defacls[bucket['name']] = defacl
    # write log entry
    write_log_entry('bucket_acls', acls)
    write_log_entry('bucket_defacls', defacls)

    return HttpResponse('')
Example #6
0
    def from_google_cloud_storage(cls, bucket_name, filename):
        """
        Factory method for building a configuration class instance from a JSON file in the Google Cloud Storage.

        Args:
            bucket_name: bucket name.
            filename:    object name.

        Returns:
            File contents.
        """
        from google_helpers.storage_service import get_storage_resource
        logger.debug("{}.from_google_cloud_storage {} {}".format(
            type(cls), repr(bucket_name), repr(filename)))
        storage_service = get_storage_resource()
        req = storage_service.objects().get_media(bucket=bucket_name,
                                                  object=filename)
        json_file = req.execute()
        return cls.from_json_string(json_file)
Example #7
0
 def __init__(self):
     self.storage = storage_service.get_storage_resource()
Example #8
0
 def __init__(self):
     self.storage = storage_service.get_storage_resource()
Example #9
0
    def _table_to_gcs(self,
                      file_format,
                      dataset_and_table,
                      export_type,
                      table_job_id=None):

        bq_service = get_bigquery_service()

        result = {'status': None, 'message': None}

        # presence of a table_job_id means the export query was still running when this
        # method was called; give it another round of checks
        if table_job_id:
            job_is_done = bq_service.jobs().get(
                projectId=settings.BIGQUERY_PROJECT_ID,
                jobId=table_job_id).execute()
            retries = 0
            while (job_is_done and not job_is_done['status']['state'] == 'DONE'
                   ) and retries < BQ_ATTEMPT_MAX:
                retries += 1
                sleep(1)
                job_is_done = bq_service.jobs().get(
                    projectId=settings.BIGQUERY_PROJECT_ID,
                    jobId=table_job_id).execute()

            if job_is_done and not job_is_done['status']['state'] == 'DONE':
                logger.debug(str(job_is_done))
                msg = "Export of {} to gs://{}/{} did not complete in the time allowed".format(
                    export_type, self.bucket_path, self.file_name)
                logger.error("[ERROR] {}.".format(msg))
                result['status'] = 'error'
                result['message'] = msg + "--please contact the administrator."
                return result
            else:
                dataset_and_table = {
                    'dataset_id':
                    job_is_done['configuration']['query']['destinationTable']
                    ['datasetId'],
                    'table_id':
                    job_is_done['configuration']['query']['destinationTable']
                    ['tableId']
                }

        job_id = str(uuid4())

        export_config = {
            'jobReference': {
                'projectId': self.project_id,
                'jobId': job_id
            },
            'configuration': {
                'extract': {
                    'sourceTable': {
                        'projectId': self.project_id,
                        'datasetId': dataset_and_table['dataset_id'],
                        'tableId': dataset_and_table['table_id']
                    },
                    'destinationUris':
                    ['gs://{}/{}'.format(self.bucket_path, self.file_name)],
                    'destinationFormat':
                    file_format,
                    'compression':
                    'GZIP'
                }
            }
        }

        export_job = bq_service.jobs().insert(
            projectId=settings.BIGQUERY_PROJECT_ID,
            body=export_config).execute(num_retries=5)

        job_is_done = bq_service.jobs().get(
            projectId=settings.BIGQUERY_PROJECT_ID, jobId=job_id).execute()

        retries = 0

        while (job_is_done and not job_is_done['status']['state'] == 'DONE'
               ) and retries < BQ_ATTEMPT_MAX:
            retries += 1
            sleep(1)
            job_is_done = bq_service.jobs().get(
                projectId=settings.BIGQUERY_PROJECT_ID,
                jobId=job_id).execute()

        logger.debug("[STATUS] extraction job_is_done: {}".format(
            str(job_is_done)))

        if job_is_done and job_is_done['status']['state'] == 'DONE':
            if 'status' in job_is_done and 'errors' in job_is_done['status']:
                msg = "Export of {} to GCS bucket {} was unsuccessful, reason: {}".format(
                    export_type, self.bucket,
                    job_is_done['status']['errors'][0]['message'])
                logger.error("[ERROR] {}".format(msg))
                result['status'] = 'error'
                result[
                    'message'] = "Unable to export {} to bucket {}--please contact the administrator.".format(
                        export_type, self.bucket)
            else:
                # Check the file
                exported_file = get_storage_resource().objects().get(
                    bucket=self.bucket_path, object=self.file_name).execute()
                if not exported_file:
                    msg = "Export file {}/{} not found".format(
                        self.bucket_path, self.file_name)
                    logger.error("[ERROR] ".format({msg}))
                    export_result = bq_service.jobs().get(
                        projectId=settings.BIGQUERY_PROJECT_ID,
                        jobId=job_id).execute()
                    if 'errors' in export_result:
                        logger.error('[ERROR] Errors seen: {}'.format(
                            export_result['errors'][0]['message']))
                    result['status'] = 'error'
                    result[
                        'message'] = "Unable to export {} to file {}/{}--please contact the administrator.".format(
                            export_type, self.bucket_path, self.file_name)
                else:
                    if int(exported_file['size']) > 0:
                        logger.info(
                            "[STATUS] Successfully exported {} into GCS file gs://{}/{}"
                            .format(export_type, self.bucket_path,
                                    self.file_name))
                        result['status'] = 'success'
                        result['message'] = "{}MB".format(
                            str(
                                round((float(exported_file['size']) / 1000000),
                                      2)))
                    else:
                        msg = "File gs://{}/{} created, but appears empty. Export of {} may not have succeeded".format(
                            export_type, self.bucket_path, self.file_name)
                        logger.warn("[WARNING] {}.".format(msg))
                        result['status'] = 'error'
                        result[
                            'message'] = msg + "--please contact the administrator."
        else:
            logger.debug(str(job_is_done))
            msg = "Export of {} to gs://{}/{} did not complete in the time allowed".format(
                export_type, self.bucket_path, self.file_name)
            logger.error("[ERROR] {}.".format(msg))
            result['status'] = 'error'
            result['message'] = msg + "--please contact the administrator."

        return result
Example #10
0
def index(request):
    req = prepare_django_request(request)
    auth = init_saml_auth(req)
    errors = []
    not_auth_warn = False
    success_slo = False
    attributes = False
    paint_logout = False

    if 'sso' in req['get_data']:
        if 'redirect_url' in req['get_data']:
            return HttpResponseRedirect(auth.login(return_to=req['get_data']['redirect_url']))
        else:
            return HttpResponseRedirect(auth.login())
    elif 'sso2' in req['get_data']:
        return_to = OneLogin_Saml2_Utils.get_self_url(req) + reverse('attrs')
        return HttpResponseRedirect(auth.login(return_to))
    elif 'slo' in req['get_data']:
        name_id = None
        session_index = None
        if 'samlNameId' in request.session:
            name_id = request.session['samlNameId']
        if 'samlSessionIndex' in request.session:
            session_index = request.session['samlSessionIndex']

        # update record of user in table accounts_nih_user
        # so that active=0
        # and dbGaP_authorized=0

        return HttpResponseRedirect(auth.logout(name_id=name_id, session_index=session_index))
    elif 'acs' in req['get_data']:
        auth.process_response()
        errors = auth.get_errors()
        if errors:
            logger.info('executed auth.get_errors(). errors are:')
            logger.warn(errors)
            logger.info('error is because')
            logger.warn(auth.get_last_error_reason())

        not_auth_warn = not auth.is_authenticated()

        if not errors:
            request.session['samlUserdata'] = auth.get_attributes()
            request.session['samlNameId'] = auth.get_nameid()
            NIH_username = request.session['samlNameId']
            request.session['samlSessionIndex'] = auth.get_session_index()

            user_email = User.objects.get(id=request.user.id).email

            # check to see if user already has authenticated through
            # another NIH_username. If so, don't allow the same google email
            # to be linked to two different NIH usernames
            nih_usernames_already_linked_to_google_identity = NIH_User.objects.filter(user_id=request.user.id)
            for nih_user in nih_usernames_already_linked_to_google_identity:
                if nih_user.NIH_username != NIH_username:
                    logger.warn("User {} is already linked to the eRA commons identity {} and attempted authentication"
                                " with the eRA commons identity {}."
                                .format(user_email, nih_user.NIH_username, NIH_username))
                    messages.warning(request, "User {} is already linked to the eRA commons identity {}. "
                                           "Please unlink these before authenticating with the eRA commons identity {}."
                                     .format(user_email, nih_user.NIH_username, NIH_username))
                    return redirect('/users/' + str(request.user.id))

            # check if there is another google identity with the same NIH_username
            try:
                preexisting_nih_user = NIH_User.objects.get(NIH_username=NIH_username)
                if preexisting_nih_user.user_id != request.user.id:
                    logger.warn("User id {} tried to log into the NIH account {} that is already linked to user {}".format(
                        user_email,
                        NIH_username,
                        preexisting_nih_user.user_id
                    ))
                    messages.warning(request, "You tried to log into an NIH account that is linked to another google email address.")
                    return redirect('/users/' + str(request.user.id))

            except (ObjectDoesNotExist, MultipleObjectsReturned), e:
                # only redirect if there is a MultipleObjectsReturned error
                if type(e) is MultipleObjectsReturned:
                    logger.error("Error %s on NIH login: more than one NIH User with NIH_username %s" % (str(e), NIH_username))
                    return redirect('/users/' + str(request.user.id))

            storage_client = get_storage_resource()
            # check authenticated NIH username against NIH authentication list
            is_dbGaP_authorized = check_NIH_authorization_list(NIH_username, storage_client)

            saml_response = None if 'SAMLResponse' not in req['post_data'] else req['post_data']['SAMLResponse']
            saml_response = saml_response.replace('\r\n', '')
            NIH_assertion_expiration = datetime.datetime.now() + datetime.timedelta(days=1)

            updated_values = {
                'NIH_assertion': saml_response,
                'NIH_assertion_expiration': pytz.utc.localize(NIH_assertion_expiration),
                'dbGaP_authorized': is_dbGaP_authorized,
                'user_id': request.user.id,
                'active': 1
            }

            nih_user, created = NIH_User.objects.update_or_create(NIH_username=NIH_username,
                                                                  user_id=request.user.id,
                                                                  defaults=updated_values)
            logger.info("NIH_User.objects.update_or_create() returned nih_user: {} and created: {}".format(
                str(nih_user.NIH_username), str(created)))

            # add or remove user from ACL_GOOGLE_GROUP if they are or are not dbGaP authorized
            directory_client, http_auth = get_directory_resource()
            # default warn message is for eRA Commons users who are not dbGaP authorized
            warn_message = '''
            WARNING NOTICE
            You are accessing a US Government web site which may contain information that must be protected under the US Privacy Act or other sensitive information and is intended for Government authorized use only.

            Unauthorized attempts to upload information, change information, or use of this web site may result in disciplinary action, civil, and/or criminal penalties. Unauthorized users of this website should have no expectation of privacy regarding any communications or data processed by this website.

            Anyone accessing this website expressly consents to monitoring of their actions and all communications or data transiting or stored on related to this website and is advised that if such monitoring reveals possible evidence of criminal activity, NIH may provide that evidence to law enforcement officials.
            '''

            if is_dbGaP_authorized:
                # if user is dbGaP authorized, warn message is different
                warn_message = 'You are reminded that when accessing controlled access information you are bound by the dbGaP TCGA DATA USE CERTIFICATION AGREEMENT (DUCA).' + warn_message
            try:
                result = directory_client.members().get(groupKey=ACL_GOOGLE_GROUP,
                                                        memberKey=user_email).execute(http=http_auth)
                # if the user is in the google group but isn't dbGaP authorized, delete member from group
                if len(result) and not is_dbGaP_authorized:
                    directory_client.members().delete(groupKey=ACL_GOOGLE_GROUP,
                                                      memberKey=user_email).execute(http=http_auth)
                    logger.warn("User {} was deleted from group {} because they don't have dbGaP authorization.".format(user_email, ACL_GOOGLE_GROUP))
            # if the user_email doesn't exist in the google group an HttpError will be thrown...
            except HttpError:
                # ...if the user is dbGaP authorized they should be added to the ACL_GOOGLE_GROUP
                if is_dbGaP_authorized:
                    body = {
                        "email": user_email,
                        "role": "MEMBER"
                    }
                    result = directory_client.members().insert(
                        groupKey=ACL_GOOGLE_GROUP,
                        body=body
                    ).execute(http=http_auth)
                    logger.info(result)
                    logger.info("User {} added to {}.".format(user_email, ACL_GOOGLE_GROUP))

            # Add task in queue to deactivate NIH_User entry after NIH_assertion_expiration has passed.
            try:
                task = Task(url=CHECK_NIH_USER_LOGIN_TASK_URI,
                            params={'user_id': request.user.id, 'deployment': CRON_MODULE},
                            countdown=COUNTDOWN_SECONDS)
                task.add(queue_name=LOGOUT_WORKER_TASKQUEUE)
                logger.info('enqueued check_login task for user, {}, for {} hours from now'.format(
                    request.user.id, COUNTDOWN_SECONDS / (60*60)))
            except Exception as e:
                logger.error("Failed to enqueue automatic logout task")
                logging.exception(e)

            messages.info(request, warn_message)
            return HttpResponseRedirect(auth.redirect_to('https://{}'.format(req['http_host'])))
Example #11
0
def index(request):
    req = prepare_django_request(request)
    auth = init_saml_auth(req)
    errors = []
    not_auth_warn = False
    success_slo = False
    attributes = False
    paint_logout = False

    if 'sso' in req['get_data']:
        if 'redirect_url' in req['get_data']:
            return HttpResponseRedirect(auth.login(return_to=req['get_data']['redirect_url']))
        else:
            return HttpResponseRedirect(auth.login())
    elif 'sso2' in req['get_data']:
        return_to = OneLogin_Saml2_Utils.get_self_url(req) + reverse('attrs')
        return HttpResponseRedirect(auth.login(return_to))
    elif 'slo' in req['get_data']:
        name_id = None
        session_index = None
        if 'samlNameId' in request.session:
            name_id = request.session['samlNameId']
        if 'samlSessionIndex' in request.session:
            session_index = request.session['samlSessionIndex']

        # update record of user in table accounts_nih_user
        # so that active=0
        # and dbGaP_authorized=0

        return HttpResponseRedirect(auth.logout(name_id=name_id, session_index=session_index))
    elif 'acs' in req['get_data']:
        auth.process_response()
        errors = auth.get_errors()
        if errors:
            logger.info('executed auth.get_errors(). errors are:')
            logger.warn(errors)
            logger.info('error is because')
            logger.warn(auth.get_last_error_reason())

        not_auth_warn = not auth.is_authenticated()

        if not errors:
            request.session['samlUserdata'] = auth.get_attributes()
            request.session['samlNameId'] = auth.get_nameid()
            NIH_username = request.session['samlNameId']
            request.session['samlSessionIndex'] = auth.get_session_index()

            user_email = User.objects.get(id=request.user.id).email

            # 1. check if this google identity is currently linked to other NIH usernames
            # note: the NIH username exclusion is case-insensitive so this will not return a false positive
            # e.g. if this google identity is linked to 'NIHUSERNAME1' but just authenticated with 'nihusername1',
            # it will still pass this test
            nih_usernames_already_linked_to_this_google_identity = NIH_User.objects.filter(
                user_id=request.user.id, linked=True).exclude(NIH_username__iexact=NIH_username)
            for nih_user in nih_usernames_already_linked_to_this_google_identity:
                if nih_user.NIH_username.lower() != NIH_username.lower():
                    logger.warn("User {} is already linked to the eRA commons identity {} and attempted authentication"
                                " with the eRA commons identity {}."
                                .format(user_email, nih_user.NIH_username, NIH_username))
                    messages.warning(request, "User {} is already linked to the eRA commons identity {}. "
                                              "Please unlink these before authenticating with the eRA commons "
                                              "identity {}.".format(user_email, nih_user.NIH_username, NIH_username))
                    return redirect('/users/' + str(request.user.id))

            # 2. check if there are other google identities that are still linked to this NIH_username
            # note: the NIH username match is case-insensitive so this will not return a false negative.
            # e.g. if a different google identity is linked to 'NIHUSERNAME1' and this google identity just authenticated with 'nihusername1',
            # this will fail the test and return to the /users/ url with a warning message
            preexisting_nih_users = NIH_User.objects.filter(
                NIH_username__iexact=NIH_username, linked=True).exclude(user_id=request.user.id)

            if len(preexisting_nih_users) > 0:
                preexisting_nih_user_user_ids = [preexisting_nih_user.user_id for preexisting_nih_user in preexisting_nih_users]
                prelinked_user_email_list = [user.email for user in User.objects.filter(id__in=preexisting_nih_user_user_ids)]
                prelinked_user_emails = ', '.join(prelinked_user_email_list)

                logger.warn("User {} tried to log into the NIH account {} that is already linked to user(s) {}".format(
                    user_email,
                    NIH_username,
                    prelinked_user_emails + '.'
                ))
                messages.warning(request, "You tried to log into an NIH account that is linked to another google email address.")
                return redirect('/users/' + str(request.user.id))

            storage_client = get_storage_resource()
            # check authenticated NIH username against NIH authentication list
            is_dbGaP_authorized = check_NIH_authorization_list(NIH_username, storage_client)

            saml_response = None if 'SAMLResponse' not in req['post_data'] else req['post_data']['SAMLResponse']
            saml_response = saml_response.replace('\r\n', '')
            NIH_assertion_expiration = datetime.datetime.now() + datetime.timedelta(days=1)

            updated_values = {
                'NIH_assertion': saml_response,
                'NIH_assertion_expiration': pytz.utc.localize(NIH_assertion_expiration),
                'dbGaP_authorized': is_dbGaP_authorized,
                'user_id': request.user.id,
                'active': 1,
                'linked': True
            }

            nih_user, created = NIH_User.objects.update_or_create(NIH_username=NIH_username,
                                                                  user_id=request.user.id,
                                                                  defaults=updated_values)
            logger.info("NIH_User.objects.update_or_create() returned nih_user: {} and created: {}".format(
                str(nih_user.NIH_username), str(created)))

            # add or remove user from ACL_GOOGLE_GROUP if they are or are not dbGaP authorized
            directory_client, http_auth = get_directory_resource()
            # default warn message is for eRA Commons users who are not dbGaP authorized
            warn_message = '''
            WARNING NOTICE
            You are accessing a US Government web site which may contain information that must be protected under the US Privacy Act or other sensitive information and is intended for Government authorized use only.

            Unauthorized attempts to upload information, change information, or use of this web site may result in disciplinary action, civil, and/or criminal penalties. Unauthorized users of this website should have no expectation of privacy regarding any communications or data processed by this website.

            Anyone accessing this website expressly consents to monitoring of their actions and all communications or data transiting or stored on related to this website and is advised that if such monitoring reveals possible evidence of criminal activity, NIH may provide that evidence to law enforcement officials.
            '''

            if is_dbGaP_authorized:
                # if user is dbGaP authorized, warn message is different
                warn_message = 'You are reminded that when accessing controlled access information you are bound by the dbGaP TCGA DATA USE CERTIFICATION AGREEMENT (DUCA).' + warn_message
            try:
                result = directory_client.members().get(groupKey=ACL_GOOGLE_GROUP,
                                                        memberKey=user_email).execute(http=http_auth)
                # if the user is in the google group but isn't dbGaP authorized, delete member from group
                if len(result) and not is_dbGaP_authorized:
                    directory_client.members().delete(groupKey=ACL_GOOGLE_GROUP,
                                                      memberKey=user_email).execute(http=http_auth)
                    logger.warn("User {} was deleted from group {} because they don't have dbGaP authorization.".format(user_email, ACL_GOOGLE_GROUP))
            # if the user_email doesn't exist in the google group an HttpError will be thrown...
            except HttpError:
                # ...if the user is dbGaP authorized they should be added to the ACL_GOOGLE_GROUP
                if is_dbGaP_authorized:
                    body = {
                        "email": user_email,
                        "role": "MEMBER"
                    }
                    result = directory_client.members().insert(
                        groupKey=ACL_GOOGLE_GROUP,
                        body=body
                    ).execute(http=http_auth)
                    logger.info(result)
                    logger.info("User {} added to {}.".format(user_email, ACL_GOOGLE_GROUP))

            # Add task in queue to deactivate NIH_User entry after NIH_assertion_expiration has passed.
            try:
                task = Task(url=CHECK_NIH_USER_LOGIN_TASK_URI,
                            params={'user_id': request.user.id, 'deployment': CRON_MODULE},
                            countdown=COUNTDOWN_SECONDS)
                task.add(queue_name=LOGOUT_WORKER_TASKQUEUE)
                logger.info('enqueued check_login task for user, {}, for {} hours from now'.format(
                    request.user.id, COUNTDOWN_SECONDS / (60*60)))
            except Exception as e:
                logger.error("Failed to enqueue automatic logout task")
                logging.exception(e)

            messages.info(request, warn_message)
            return HttpResponseRedirect(auth.redirect_to('https://{}'.format(req['http_host'])))

    elif 'sls' in req['get_data']:
        dscb = lambda: request.session.flush()
        url = auth.process_slo(delete_session_cb=dscb)
        errors = auth.get_errors()
        if len(errors) == 0:
            if url is not None:
                return HttpResponseRedirect(url)
            else:
                success_slo = True

    if 'samlUserdata' in request.session:
        paint_logout = True
        if len(request.session['samlUserdata']) > 0:
            attributes = request.session['samlUserdata'].items()

    return render_to_response('demo/index.html',
                              {'errors': errors,
                               'not_auth_warn': not_auth_warn,
                               'success_slo': success_slo,
                               'attributes': attributes,
                               'paint_logout': paint_logout},
                              context_instance=RequestContext(request))