コード例 #1
0
def send_invitation_email(user, validation_token, full_name):
    """
    First invitation email once a new trial has been created by staff member.
    """
    msg = Message(subject="Twitter Channel Sign-up - Genesys Social Analytics",
                  sender=_get_sender(),
                  recipients=[user.email])
    msg.html = render_template("mail/invitation_email.html",
                               full_name=full_name,
                               url=validation_token.signup_url)
    # LOGGER.debug(msg.body)
    if get_app_mode() != 'dev':
        send_mail(msg, email_description='invitation email')
    else:
        LOGGER.info(msg.html)
コード例 #2
0
def send_validation_email(user, channel):
    """
    Sends validation email once successfully configured a twitter channel through the signup wizzard.
    """
    msg = Message(
        subject="Twitter Channel Confirmation - Genesys Social Analytics",
        sender=_get_sender(),
        recipients=[user.email])
    msg.html = render_template("mail/validation_email.html",
                               channel_name=channel.title,
                               url=get_var('HOST_DOMAIN'))
    # LOGGER.debug(msg.body)
    if get_app_mode() != 'dev':
        send_mail(msg, email_description='channel confirmation email')
    else:
        LOGGER.info(msg.html)
コード例 #3
0
def send_new_channel_creation(staff_user, admin_user, channel, link):
    """
    Send warning email to a user once a user with a trial account creates a new channel
    """
    msg = Message(
        subject="A channel has been created for a trial account you created",
        sender=_get_sender(),
        recipients=[staff_user.email] + get_var('ONBOARDING_ADMIN_LIST'))
    handles = channel.usernames
    thandle = 'Unknown'
    for handle in handles:
        if handle.startswith('@'):
            thandle = handle

    # Get the sender name
    staff_user_name = 'Unknown'
    admin_user_name = 'Unknown'
    admin_email = 'Unknown'
    ckeywords = 'None defined yet'
    cskipwords = 'None defined yet'

    if staff_user.first_name is not None:
        staff_user_name = staff_user.first_name
    if admin_user.first_name is not None:
        admin_user_name = admin_user.first_name
    if admin_user.email is not None:
        admin_email = admin_user.email
    if channel.keywords:
        ckeywords = ''
        for keyword in channel.keywords:
            ckeywords += keyword + ' '
    if channel.skipwords:
        cskipwords = ''
        for skipword in channel.skipwords:
            cskipwords += skipword + ' '
    msg.html = render_template("mail/new_channel_creation.html",
                               staff_user_name=staff_user_name,
                               admin_user_name=admin_user_name,
                               admin_user_email=admin_email,
                               keywords=ckeywords,
                               skipwords=cskipwords,
                               twitter_handle=thandle,
                               channel_link=link)
    if get_app_mode() != 'dev':
        send_mail(msg, email_description='new channel confirmation email')
    else:
        LOGGER.info(msg.html)
コード例 #4
0
ファイル: config.py プロジェクト: princez1214/flask
    def load(self):
        try:
            import solariat_bottle.jobs_config as cfg
        except ImportError:
            LOGGER.error('Jobs configuration file "jobs_config.py" is not found.')
            raise

        self.kafka_broker = cfg.KAFKA_BROKER
        self.consumers_group = cfg.CONSUMERS_GROUP
        self.supported_topics = cfg.TOPICS
        self.cluster_config = cfg.CLUSTER_CONFIG
        if get_app_mode() == 'test':
            # default_transport = 'SERIAL'
            self.transport = 'serial'
        else:
            default_transport = 'kafka'
            self.transport = getattr(cfg, 'TRANSPORT', default_transport)
コード例 #5
0
def error_notifier_500(subject, raised_exception, system_info=None):
    if get_app_mode() == 'prod':
        recipients = get_var('DEV_ADMIN_LIST')
        if not recipients:
            return
        msg = Message(subject=subject)
        msg.recipients = recipients
        msg.html = render_template("error_pages/500_email_notification.html",
                                   error_message=raised_exception,
                                   system_info=system_info)
        try:
            send_mail(msg)
        except EmailSendError:
            pass
    else:
        LOGGER.error("Got unhandled exception:")
        LOGGER.error(raised_exception)
        LOGGER.error("System info when it was raised:")
        LOGGER.error(system_info)
コード例 #6
0
def send_confimation_email(user, on_boarded_customer, keywods, usernames,
                           skipwords):
    """
    Send confirmation email to a user once signup process is completed
    """
    msg = Message(
        subject="Customer just signed up! - Genesys Social Analytics",
        sender=_get_sender(),
        recipients=[user.email] + get_var('ONBOARDING_ADMIN_LIST'))
    msg.html = render_template("mail/confirmation_email.html",
                               user_name=user.first_name,
                               customer_email=on_boarded_customer.email,
                               keywords=keywods,
                               usernames=usernames,
                               skipwords=skipwords)
    if get_app_mode() != 'dev':
        send_mail(msg, email_description='signup notification email')
    else:
        LOGGER.info(msg.html)
コード例 #7
0
def send_account_posts_limit_warning(user, percentage, volume):
    """
    Sends a warning message to the Account admin when the posts limit for that account has reached a percentage
    :param user: The admin user or users
    :param percentage: The percentage surpassed
    :param volume: The volume allowed for this account
    """
    msg = Message(subject="Genesys Social Analytics Notification, " +
                  percentage + " of Permitted Traffic Volume Reached",
                  sender=_get_sender(),
                  recipients=[user.email])
    msg.html = render_template("mail/threshold_warning.html",
                               user_name=user.first_name or "Admin",
                               percentage=percentage,
                               volume=volume)

    if get_app_mode() != 'dev':
        send_mail(msg)
    else:
        LOGGER.info(msg.html)
コード例 #8
0
def send_user_create_notification(user):
    """
    Sends a notification to recently created user with a url of the app and a url to reset his password.

    :param user: The recently create user
    """
    url = user.signed_password_url()
    app_url = get_var('HOST_DOMAIN')
    msg = Message(subject="Your Genesys Social Analytics Login Has Been Setup",
                  sender=_get_sender(),
                  recipients=[user.email])
    msg.html = render_template("mail/user_creation_email.html",
                               url=url,
                               app_url=app_url)
    # LOGGER.debug(msg.body)
    if get_app_mode() != 'dev':
        send_mail(msg,
                  email_description='email notification to the created user')
    else:
        LOGGER.info(msg.html)
コード例 #9
0
def send_tag_alert_emails(tag):
    """
    Sends emails to every tag.alert_email
    :param tag SmartTagChannel:
    :returns: True if sending process was successful, False otherwise
    """

    if not tag.alert_can_be_sent:
        return False

    # to prevent sending several emails by different processes
    # we sent alert_last_sent_at to current time
    # it is changed back if sending was unsuccesful
    previous_sent_time = tag.alert_last_sent_at
    tag.alert_last_sent_at = datetime.now()
    tag.save()

    # noinspection PyBroadException
    try:
        tag_edit_url = '{}/configure#/tags/edit/{}'.format(
            get_var('HOST_DOMAIN'), str(tag.id))
        for user_email in tag.alert_emails:
            s = URLSafeSerializer(get_var('UNSUBSCRIBE_KEY'),
                                  get_var('UNSUBSCRIBE_SALT'))
            email_tag_id = s.dumps((user_email, str(tag.id)))
            tag_unsubscribe_url = '{}/unsubscribe/tag/{}'.format(
                get_var('HOST_DOMAIN'), email_tag_id)
            tag_view_url = '{}/inbound#?tag={}&channel={}'.format(
                get_var('HOST_DOMAIN'), str(tag.id), str(tag.parent_channel))
            msg = Message(subject="Geneys Social Analytics Alert - Smart Tag",
                          sender=_get_sender(),
                          recipients=[user_email])
            try:
                user = User.objects.get(email=user_email)
            except User.DoesNotExist:
                user = None
            msg.html = render_template(
                "mail/smarttag_alert.html",
                user_name=user.first_name if user else None,
                tag_title=tag.title,
                tag_edit_url=tag_edit_url,
                tag_unsubscribe_url=tag_unsubscribe_url,
                tag_view_url=tag_view_url)
            # get_var('ON_TEST') since when running tests
            # get_app_mode() returns 'dev' in pool_worker thread
            try:
                app_mode = get_app_mode()
            except RuntimeError:
                app_mode = 'dev'
            if app_mode != 'dev' or get_var('ON_TEST'):
                send_mail(msg)
            else:
                LOGGER.info(msg.html)
        tag.alert_last_sent_at = datetime.now()
        tag.save()
        return True
    except:
        if previous_sent_time:
            tag.alert_last_sent_at = previous_sent_time
            tag.save()
        return False
コード例 #10
0
ファイル: classifiers.py プロジェクト: princez1214/flask
    def retrain(self, predictor):
        model = self.predictor_model
        predictor.select_model(model)
        feature_size = predictor.features_space_size(model)
        test_size = float(model.train_data_percentage) / 100
        model.reset_performance_stats()
        new_state = model.state.try_change(action='train')
        from solariat_bottle.settings import get_app_mode
        if new_state is None and get_app_mode() != 'test':
            return

        batch_size = BATCH_SIZE

        max_entries = MAX_DATA_SIZE / feature_size
        max_entries -= max_entries % batch_size

        LOGGER.info("Training with a max size of " + str(max_entries))
        query = dict(predictor_id=predictor.id)
        if model.from_dt:
            query[predictor.training_data_class.created_at.db_field] = {
                '$gte': model.from_dt
            }
        if model.to_dt:
            query[predictor.training_data_class.created_at.db_field] = {
                '$lte': model.to_dt
            }
        total_count = predictor.train_set_length
        if total_count <= 5000:
            test_size = -1

        test_samples = []
        if self.model_type in (GLOBAL, HYBRID, DISJOINT):
            LOGGER.info("RETRAIN:: Training global model.")
            # Train the global model
            sample_rate = -1
            if total_count > max_entries:
                sample_rate = float(max_entries) / total_count
            context_list = []
            reward_list = []
            n_batches = total_count / batch_size + 1
            start_dt_loop = dt.now()
            n_batch_disctinct_values = predictor.training_data_class.objects.coll.distinct(
                'nbch')
            n_batches = len(n_batch_disctinct_values)
            LOGGER.info(
                "RETRAIN:: global model batches size: %s; total batches: %s" %
                (batch_size, n_batches))
            for i in xrange(len(n_batch_disctinct_values)):
                LOGGER.info(
                    "RETRAIN: Global model. Reading batch %s of size %s; total distinct n_batches vals: %s"
                    % (i, 'UNKNOWN', len(n_batch_disctinct_values)))
                start_dt = dt.now()
                n_batch_value = random.choice(n_batch_disctinct_values)
                n_batch_disctinct_values.remove(n_batch_value)
                n_batch_disctinct_values_len = len(n_batch_disctinct_values)
                query[predictor.training_data_class.n_batch.
                      db_field] = n_batch_value
                batch_contexts, batch_actions, batch_rewards = self.batched_read(
                    predictor,
                    query,
                    sample_rate,
                    batch_size=None,
                    batch_idx=None,
                    test_samples=test_samples,
                    test_split_size=test_size)
                LOGGER.info(
                    "RETRAIN: Global model. read n_batch: %s (%s), batch size: %s, timedelta: %s, accumulative timedelta: %s",
                    n_batch_value, n_batch_disctinct_values_len,
                    len(batch_contexts),
                    dt.now() - start_dt,
                    dt.now() - start_dt_loop)
                context_list.extend(batch_contexts)
                reward_list.extend(batch_rewards)

                context_list_len = len(context_list)
                if context_list_len / (1.0 if self.model_type != HYBRID else
                                       2.0) >= total_count:
                    if new_state:
                        model.update(state=new_state,
                                     version=(model.version or 0) + 1)
                    model.n_rows = total_count
                predictor.save_progress(
                    model, context_list_len /
                    (1.0 if self.model_type != HYBRID else 2.0), total_count)

                LOGGER.info("RETRAIN: context_list len: %s", len(context_list))
                # if we reached max_entries limit lets cut the tail and break the loop
                if len(context_list) >= max_entries:
                    context_list = context_list[:max_entries]
                    reward_list = reward_list[:max_entries]
                    break
            LOGGER.info("RETRAIN:: Done with reading, Timedelta: %s.",
                        dt.now() - start_dt_loop)

            start_dt = dt.now()
            LOGGER.info("RETRAIN:: context_list len: %s, reward_list len: %s",
                        len(context_list), len(reward_list))
            LOGGER.info("Training global model now...")
            # sorted() calls is an overhead, but let do it
            # so we can be sure that two global models trained on the same data
            # will be identical
            self.fit_local_model(GLOBAL_KEY, sorted(context_list),
                                 sorted(reward_list))
            LOGGER.info("RETRAIN:: Trained global model; Timedelta: %s",
                        dt.now() - start_dt)

        del query[predictor.training_data_class.n_batch.db_field]
        if self.model_type in (HYBRID, DISJOINT):
            LOGGER.info("RETRAIN:: Training local models.")
            from collections import defaultdict
            actions = predictor.cardinalities.get(
                predictor.ACT_PREFIX + predictor.action_id_expression, [])
            local_test_samples = None if test_samples else []
            if total_count >= max_entries:
                ag_batch_size = 100  # 100 agents at a time as a mix between memory and performance
                ag_n_batches = len(actions) / ag_batch_size + 1
            else:
                ag_batch_size = total_count + 1
                ag_n_batches = 1
            start_dt_local_models = dt.now()
            for ag_batch_idx in xrange(ag_n_batches):
                start_dt_batch = dt.now()
                LOGGER.info(
                    "RETRAIN:: Training agents numbers %s of size %s (ag_n_batches: %s)",
                    str(ag_batch_idx), str(ag_batch_size), ag_n_batches)
                action_batch = actions[ag_batch_idx *
                                       ag_batch_size:(ag_batch_idx + 1) *
                                       ag_batch_size]
                query['act_id'] = {
                    '$in': [str(action_id) for action_id in action_batch]
                }
                total_count = predictor.train_set_length
                sample_rate = -1
                if total_count > max_entries:
                    sample_rate = float(max_entries) / total_count
                batch_size = BATCH_SIZE
                n_batches = total_count / batch_size + 1
                action_mappings = defaultdict(list)
                reward_mappings = defaultdict(list)

                start_dt_loop = dt.now()
                for batch_idx in xrange(n_batches):
                    start_dt_batch_iter = dt.now()
                    LOGGER.info(
                        "RETRAIN: Reading data batch nr %s (%s) of size %s",
                        batch_idx, n_batches, batch_size)
                    batch_contexts, batch_actions, batch_rewards = self.batched_read(
                        predictor, query, sample_rate, batch_size, batch_idx,
                        local_test_samples, test_size)
                    for idx in xrange(len(batch_actions)):
                        action_mappings[batch_actions[idx]].append(
                            batch_contexts[idx])
                        reward_mappings[batch_actions[idx]].append(
                            batch_rewards[idx])
                    LOGGER.info("RETRAIN: timedelta: %s",
                                dt.now() - start_dt_batch_iter)
                    LOGGER.info("RETRAIN: accumulative timedelta: %s",
                                dt.now() - start_dt_batch)
                LOGGER.info(
                    "RETRAIN:: Done reading batches for ag_batch_idx %s / %s batch size: %s; timedelta: %s",
                    ag_batch_idx, ag_n_batches, ag_batch_size,
                    dt.now() - start_dt_loop)

                action_mapping_len = len(action_mappings)
                start_dt_loop = dt.now()
                for i, action_id in enumerate(action_mappings.keys()):
                    if (action_id not in self._model_cache
                            and model.class_validity_check(
                                reward_mappings[action_id],
                                model.min_samples_thresould)):
                        self.add_local_model(
                            action_id,
                            self.get_model_instance(
                                **self.predictor_model.configuration))
                        LOGGER.info(
                            "RETRAIN: Fitting model for action: %s; %s out of %s",
                            str(action_id), i, action_mapping_len)
                        start_dt = dt.now()
                        self.fit_local_model(action_id,
                                             action_mappings[action_id],
                                             reward_mappings[action_id])
                        LOGGER.info(
                            "RETRAIN: Action fit_local_model() call: %s; loop timedelta: %s",
                            dt.now() - start_dt,
                            dt.now() - start_dt_batch)
                        LOGGER.info("RETRAIN: Training set size used: %s",
                                    self._model_cache[action_id].n_samples)
                    else:
                        LOGGER.warning(
                            "RETRAIN: Skipping training for individual model %s since no data is available for classes"
                            % action_id)
                LOGGER.info(
                    "RETRAIN:: Trained all models for crnt agent batch (%s of %s). Timedelta %s; Len: %s",
                    ag_batch_idx, ag_n_batches,
                    dt.now() - start_dt_loop, start_dt_loop)
                LOGGER.info("RETRAIN:: predictor_model configuration: %s",
                            self.predictor_model.configuration)
                if self.model_type == HYBRID:
                    if total_count / 2.0 + batch_size * (
                            batch_idx + 1) / 2.0 >= total_count:
                        if new_state:
                            model.update(state=new_state,
                                         version=(model.version or 0) + 1)
                        model.n_rows = total_count
                    predictor.save_progress(
                        model,
                        total_count / 2.0 + batch_size * (batch_idx + 1) / 2.0,
                        total_count)
                else:
                    if batch_size * (batch_idx + 1) >= total_count:
                        if new_state:
                            model.update(state=new_state,
                                         version=(model.version or 0) + 1)
                        model.n_rows = total_count
                    predictor.save_progress(model,
                                            batch_size * (batch_idx + 1),
                                            total_count)
                LOGGER.info(
                    "RETRAIN:: Done handling agent batch %s / %s (batch_size %s); Timedelta: %s",
                    ag_batch_idx, ag_n_batches, batch_size,
                    dt.now() - start_dt_batch)
            LOGGER.info("RETRAIN:: Done with local models: %s",
                        dt.now() - start_dt_local_models)
        test_samples = test_samples or local_test_samples

        import math
        from solariat_bottle.db.predictors.base_predictor import TYPE_NUMERIC, TYPE_BOOLEAN,\
            mean_squared_error, auc, roc_curve

        LOGGER.info("Computing performance metrics")
        y = []
        y_pred = []

        start_dt_metrics = dt.now()
        LOGGER.info("RETRAIN:: starting to compute metrics, len: %s",
                    len(test_samples))
        for context, action, action_id, reward in test_samples:
            predicted_score = self.score(context, [{
                KEY_DATA: action,
                ACTION_ID: action_id
            }])
            y_pred.append(predicted_score[0][1])
            # print str(predicted_score) + " WAS PREDICTED"
            if predictor.reward_type == TYPE_NUMERIC:
                y.append(reward)
                predicted_score = predicted_score[0][1]
                reward_diff = abs(reward - float(predicted_score))
                model.avg_error = (model.avg_error * float(model.nr_scores) +
                                   reward_diff) / (model.nr_scores + 1)
                model.nr_scores += 1
            elif predictor.reward_type == TYPE_BOOLEAN:
                y.append(1.0 if reward is True else 0.0)
                predicted_score = predicted_score[0][1]
                if reward and float(predicted_score) > 0.5:
                    model.true_positives += 1
                elif reward and float(predicted_score) < 0.5:
                    model.false_negatives += 1
                elif not reward and float(predicted_score) > 0.5:
                    model.false_positives += 1
                else:
                    model.true_negatives += 1
        LOGGER.info("RETRAIN:: iterated over test_samples, Timedelta: %s",
                    dt.now() - start_dt_metrics)
        start_dt_metrics = dt.now()
        if predictor.reward_type == TYPE_BOOLEAN:
            fpr, tpr, thresholds = roc_curve(y, y_pred)
            _score = "%.2f" % auc(fpr, tpr)
            model.auc = float(_score)
            LOGGER.info("RETRAIN:: auc: %s", model.auc)
        elif predictor.reward_type == TYPE_NUMERIC:
            model.mse = mean_squared_error(y, y_pred)
            model.mae = metrics.mean_absolute_error(y, y_pred)
            model.r2_score = metrics.r2_score(y, y_pred)
            _score = "%.2f" % math.sqrt(model.mse)
            model.rmse = float(_score)
            model.fraction_below_quantile = (np.array(y) <
                                             np.array(y_pred)).mean()
            LOGGER.info("RETRAIN:: rmse: %s", model.rmse)
            LOGGER.info("RETRAIN:: mse: %s", model.mse)
            LOGGER.info("RETRAIN:: fraction_below_quantile: %s",
                        model.fraction_below_quantile)

        model.save()
        LOGGER.debug(
            "RETRAIN:: Done computing performance metrics, Timedelta: %s",
            dt.now() - start_dt_metrics)

        # reseting in memory predictor model cache
        get_models_maps()[model.id] = self._model_cache