Example #1
0
def workflow_restrict_column(column: Column) -> Optional[str]:
    """Set category of the column to the existing set of values.

    Given a workflow and a column, modifies the column so that only the
    values already present are allowed for future updates.

    :param column: Column object to restrict

    :return: String with error or None if correct
    """
    # Load the data frame
    data_frame = load_table(column.workflow.get_data_frame_table_name())

    cat_values = set(data_frame[column.name].dropna())
    if not cat_values:
        # Column has no meaningful values. Nothing to do.
        return _('Column has no meaningful values')

    # Set categories
    column.set_categories(list(cat_values))
    column.save()

    # Re-evaluate the operands in the workflow
    column.workflow.set_query_builder_ops()
    column.workflow.save()

    # Correct execution
    return None
Example #2
0
def do_clone_column_only(
    column: Column,
    new_workflow: Optional[Workflow] = None,
    new_name: Optional[str] = None,
) -> Column:
    """Clone a column.

    :param column: Object to clone.

    :param new_workflow: Optional new worklow object to link to.

    :param new_name: Optional new name to use.

    :result: New object.
    """
    if new_name is None:
        new_name = column.name
    if new_workflow is None:
        new_workflow = column.workflow

    new_column = Column(
        name=new_name,
        description_text=column.description_text,
        workflow=new_workflow,
        data_type=column.data_type,
        is_key=column.is_key,
        position=column.position,
        in_viz=column.in_viz,
        categories=copy.deepcopy(column.categories),
        active_from=column.active_from,
        active_to=column.active_to,
    )
    new_column.save()
    return new_column
Example #3
0
    def clean(self):
        """Validate the initial value."""
        form_data = super().clean()

        # Try to convert the initial value ot the right type
        initial_value = form_data['initial_value']
        if initial_value:
            # See if the given value is allowed for the column data type
            try:
                self.initial_valid_value = Column.validate_column_value(
                    form_data['data_type'],
                    initial_value,
                )
            except ValueError:
                self.add_error(
                    'initial_value',
                    _('Incorrect initial value'),
                )

            categories = self.instance.get_categories()
            if categories and self.initial_valid_value not in categories:
                self.add_error(
                    'initial_value',
                    _('This value is not in the list of allowed values'),
                )

        # Check and force a correct column index
        ncols = self.workflow.columns.count()
        if form_data['position'] < 1 or form_data['position'] > ncols:
            form_data['position'] = ncols + 1

        return form_data
Example #4
0
def create_track_column(action: Action) -> str:
    """Create an additional column for email tracking.

    :param action: Action to consider
    :return: column name
    """
    # Make sure the column name does not collide with an existing one
    idx = 0  # Suffix to rename
    cnames = [col.name for col in action.workflow.columns.all()]
    while True:
        idx += 1
        track_col_name = 'EmailRead_{0}'.format(idx)
        if track_col_name not in cnames:
            break

    # Add the column if needed (before the mass email to avoid overload
    # Create the new column and store
    column = Column(
        name=track_col_name,
        description_text='Emails sent with action {0} on {1}'.format(
            action.name,
            simplify_datetime_str(
                datetime.datetime.now(pytz.timezone(
                    ontask_settings.TIME_ZONE))),
        ),
        workflow=action.workflow,
        data_type='integer',
        is_key=False,
        position=action.workflow.ncols + 1,
    )
    column.save()

    # Increase the number of columns in the workflow
    action.workflow.ncols += 1
    action.workflow.save()

    # Add the column to the DB table
    add_column_to_db(action.workflow.get_data_frame_table_name(),
                     track_col_name, 'integer', 0)

    return track_col_name
Example #5
0
    def create(self, validated_data, **kwargs):

        # Preliminary checks
        data_type = validated_data.get('data_type', None)
        if data_type is None or \
                data_type not in pandas_datatype_names.values():
            # The data type is not legal
            raise Exception(_('Incorrect data type {0}.').format(data_type))

        column_obj = None
        try:
            # Create the object, but point to the given workflow
            column_obj = Column(
                name=validated_data['name'],
                description_text=validated_data.get('description_text', ''),
                workflow=self.context['workflow'],
                data_type=data_type,
                is_key=validated_data.get('is_key', False),
                position=validated_data.get('position', 0),
                in_viz=validated_data.get('in_viz', True),
                active_from=validated_data.get('active_from', None),
                active_to=validated_data.get('active_to', None),
            )

            # Set the categories if they exists
            column_obj.set_categories(validated_data.get('categories', []),
                                      True)

            if column_obj.active_from and column_obj.active_to and \
                    column_obj.active_from > column_obj.active_to:
                raise Exception(
                    _('Incorrect date/times in the active window for '
                      'column {0}').format(validated_data['name']))

            # TODO: Fix the position field when creating the columns
            # All tests passed, proceed to save the object.
            column_obj.save()
        except Exception as e:
            if column_obj:
                column_obj.delete()
            raise e

        return column_obj
Example #6
0
def send_messages(user, action, subject, email_column, canvas_id_column,
                  from_email, send_to_canvas, send_confirmation, track_read):
    """
    Performs the submission of the emails for the given action and with the
    given subject. The subject will be evaluated also with respect to the
    rows, attributes, and conditions.
    :param user: User object that executed the action
    :param action: Action from where to take the messages
    :param subject: Email subject
    :param email_column: Name of the column from which to extract emails
    :param from_email: Email of the sender
    :param send_confirmation: Boolean to send confirmation to sender
    :param track_read: Should read tracking be included?
    :return: Send the emails
    """

    # Evaluate the action string, evaluate the subject, and get the value of
    # the email colummn.
    result = evaluate_action(action,
                             extra_string=subject,
                             column_name=email_column)
    # this is for canvas inbox
    recipients_list = []
    result_2nd = evaluate_action_canvas(action,
                                        extra_string=subject,
                                        column_name=canvas_id_column)

    # Check the type of the result to see if it was successful
    if not isinstance(result, list):
        # Something went wrong. The result contains a message
        return result

    track_col_name = ''
    data_frame = None
    if track_read:
        data_frame = pandas_db.load_from_db(action.workflow.id)
        # Make sure the column name does not collide with an existing one
        i = 0  # Suffix to rename
        while True:
            i += 1
            track_col_name = 'EmailRead_{0}'.format(i)
            if track_col_name not in data_frame.columns:
                break

    # CC the message to canvas conversation Api if the box send_to_canvas is ticked
    if send_to_canvas:
        msgs = []
        for msg_body, msg_subject, msg_to in result_2nd:

            # Get the plain text content and bundle it together with the HTML in
            # a message to be added to the list.
            text_content = strip_tags(msg_body)
            recipients_list.append(str(msg_to))
            #msg.attach_alternative(msg_body + track_str, "text/html")
            #msgs.append(msg)

        try:
            # send a plain text copy to canvas inbox
            p = post_to_canvas_api()
            p.set_payload(True, '', '', recipients_list, msg_subject,
                          html2text.html2text(msg_body))
            r = p.post_to_conversation(p.access_token, p.payload)
            # extracting response text
            pastebin_url = r.text
            print(r.status_code)
            print(pastebin_url)

        except Exception as e:
            # Something went wrong, notify above
            return str(e)

    # Update the number of filtered rows if the action has a filter (table
    # might have changed)
    filter = action.conditions.filter(is_filter=True).first()
    if filter and filter.n_rows_selected != len(result):
        filter.n_rows_selected = len(result)
        filter.save()

    # Everything seemed to work to create the messages.
    msgs = []
    for msg_body, msg_subject, msg_to in result:

        # If read tracking is on, add suffix for message (or empty)
        if track_read:
            # The track id must identify: action & user
            track_id = {
                'action': action.id,
                'sender': user.email,
                'to': msg_to,
                'column_to': email_column,
                'column_dst': track_col_name
            }

            track_str = \
                """<img src="https://{0}{1}{2}?v={3}" alt="" 
                    style="position:absolute; visibility:hidden"/>""".format(
                    Site.objects.get_current().domain,
                    ontask_settings.BASE_URL,
                    reverse('trck'),
                    signing.dumps(track_id)
                )
        else:
            track_str = ''

        # Get the plain text content and bundle it together with the HTML in
        # a message to be added to the list.
        text_content = strip_tags(msg_body)
        msg = EmailMultiAlternatives(msg_subject, text_content, from_email,
                                     [msg_to])
        msg.attach_alternative(msg_body + track_str, "text/html")
        msgs.append(msg)

    # Mass mail!
    try:
        connection = mail.get_connection()
        connection.send_messages(msgs)
    except Exception as e:
        # Something went wrong, notify above
        return str(e)

    # Add the column if needed
    if track_read:
        # Create the new column and store
        column = Column(name=track_col_name,
                        workflow=action.workflow,
                        data_type='integer',
                        is_key=False,
                        position=action.workflow.ncols + 1)
        column.save()

        # Increase the number of columns in the workflow
        action.workflow.ncols += 1
        action.workflow.save()

        # Initial value in the data frame and store the table
        data_frame[track_col_name] = 0
        ops.store_dataframe_in_db(data_frame, action.workflow.id)

    # Log the events (one per email)
    now = datetime.datetime.now(pytz.timezone(ontask_settings.TIME_ZONE))
    context = {
        'user': user.id,
        'action': action.id,
        'email_sent_datetime': str(now),
    }
    for msg in msgs:
        context['subject'] = msg.subject
        context['body'] = msg.body
        context['from_email'] = msg.from_email
        context['to_email'] = msg.to[0]
        logs.ops.put(user, 'action_email_sent', action.workflow, context)

    # Log the event
    logs.ops.put(
        user, 'action_email_sent', action.workflow, {
            'user': user.id,
            'action': action.name,
            'num_messages': len(msgs),
            'email_sent_datetime': str(now),
            'filter_present': filter is not None,
            'num_rows': action.workflow.nrows,
            'subject': subject,
            'from_email': user.email
        })

    # If no confirmation email is required, done
    if not send_confirmation:
        return None

    # Creating the context for the personal email
    context = {
        'user': user,
        'action': action,
        'num_messages': len(msgs),
        'email_sent_datetime': now,
        'filter_present': filter is not None,
        'num_rows': action.workflow.nrows,
        'num_selected': filter.n_rows_selected if filter else -1
    }

    # Create template and render with context
    try:
        html_content = Template(str(getattr(settings,
                                            'NOTIFICATION_TEMPLATE'))).render(
                                                Context(context))
        text_content = strip_tags(html_content)
    except TemplateSyntaxError as e:
        return 'Syntax error detected in OnTask notification template (' + \
               e.message + ')'

    # Log the event
    logs.ops.put(
        user, 'action_email_notify', action.workflow, {
            'user': user.id,
            'action': action.id,
            'num_messages': len(msgs),
            'email_sent_datetime': str(now),
            'filter_present': filter is not None,
            'num_rows': action.workflow.nrows,
            'subject': str(getattr(settings, 'NOTIFICATION_SUBJECT')),
            'body': text_content,
            'from_email': str(getattr(settings, 'NOTIFICATION_SENDER')),
            'to_email': [user.email]
        })

    # Send email out
    try:
        send_mail(str(getattr(settings, 'NOTIFICATION_SUBJECT')),
                  text_content,
                  str(getattr(settings, 'NOTIFICATION_SENDER')), [user.email],
                  html_message=html_content)
    except Exception as e:
        return 'An error occurred when sending your notification: ' + e.message

    return None
Example #7
0
def store_table_in_db(data_frame, pk, table_name, temporary=False):
    """
    Update or create a table in the DB with the data in the data frame. It
    also updates the corresponding column information

    :param data_frame: Data frame to dump to DB
    :param pk: Corresponding primary key of the workflow
    :param table_name: Table to use in the DB
    :param temporary: Boolean stating if the table is temporary,
           or it belongs to an existing workflow.
    :return: If temporary = True, then return a list with three lists:
             - column names
             - column types
             - column is unique
             If temporary = False, return None. All this info is stored in
             the workflow
    """

    if settings.DEBUG:
        print('Storing table ', table_name)

    # get column names
    df_column_names = list(data_frame.columns)

    # if the data frame is temporary, the procedure is much simpler
    if temporary:
        # Get the if the columns have unique values per row
        column_unique = are_unique_columns(data_frame)

        # Store the table in the DB
        store_table(data_frame, table_name)

        # Get the column types
        df_column_types = df_column_types_rename(table_name)

        # Return a list with three list with information about the
        # data frame that will be needed in the next steps
        return [df_column_names, df_column_types, column_unique]

    # We are modifying an existing DF

    # Get the workflow and its columns
    workflow = Workflow.objects.get(id=pk)
    wf_col_names = Column.objects.filter(workflow__id=pk).values_list(
        "name", flat=True)

    # Loop over the columns in the data frame and reconcile the column info
    # with the column objects attached to the WF
    for cname in df_column_names:
        # See if this is a new column
        if cname in wf_col_names:
            # If column already exists in wf_col_names, no need to do anything
            continue

        # Create the new column
        column = Column(
            name=cname,
            workflow=workflow,
            data_type=pandas_datatype_names[data_frame[cname].dtype.name],
            is_key=is_unique_column(data_frame[cname]),
            position=Column.objects.filter(workflow=workflow).count() + 1,
        )
        column.save()

    # Get now the new set of columns with names
    wf_columns = Column.objects.filter(workflow__id=pk)

    # Reorder the columns in the data frame
    data_frame = data_frame[[x.name for x in wf_columns]]

    # Store the table in the DB
    store_table(data_frame, table_name)

    # Review the column types because some "objects" are stored as booleans
    column_types = df_column_types_rename(table_name)
    for ctype, col in zip(column_types, wf_columns):
        if col.data_type != ctype:
            # If the column type in the DB is different from the one in the
            # object, update
            col.data_type = ctype
            col.save()

    # Update workflow fields and save
    workflow.nrows = data_frame.shape[0]
    workflow.ncols = data_frame.shape[1]
    workflow.set_query_builder_ops()
    workflow.data_frame_table_name = table_name
    workflow.save()

    return None
Example #8
0
def store_table_in_db(data_frame,
                      pk,
                      table_name,
                      temporary=False,
                      reset_keys=True):
    """
    Update or create a table in the DB with the data in the data frame. It
    also updates the corresponding column information

    :param data_frame: Data frame to dump to DB
    :param pk: Corresponding primary key of the workflow
    :param table_name: Table to use in the DB
    :param temporary: Boolean stating if the table is temporary,
           or it belongs to an existing workflow.
    :param reset_keys: Reset the value of the field is_key computing it from
           scratch
    :return: If temporary = True, then return a list with three lists:
             - column names
             - column types
             - column is unique
             If temporary = False, return None. All this info is stored in
             the workflow
    """

    if settings.DEBUG:
        print('Storing table ', table_name)

    # get column names
    df_column_names = list(data_frame.columns)

    # if the data frame is temporary, the procedure is much simpler
    if temporary:
        # Get the if the columns have unique values per row
        column_unique = are_unique_columns(data_frame)

        # Store the table in the DB
        store_table(data_frame, table_name)

        # Get the column types
        df_column_types = df_column_types_rename(table_name)

        # Return a list with three list with information about the
        # data frame that will be needed in the next steps
        return [df_column_names, df_column_types, column_unique]

    # We are modifying an existing DF

    # Get the workflow and its columns
    workflow = Workflow.objects.get(id=pk)
    wf_cols = workflow.columns.all()

    # Loop over the columns in the Workflow to refresh the is_key value. There
    # may be values that have been added to the column, so this field needs to
    # be reassessed
    for col in wf_cols:
        if reset_keys:
            new_val = is_unique_column(data_frame[col.name])
            if col.is_key and not new_val:
                # Only set the is_key value if the column states that it is a
                # key column, but the values say no. Othe other way around
                # is_key is false in the column will be ignored as it may have
                # been set by the user
                col.is_key = new_val
                col.save()

        # Remove this column name from wf_col_names, no further processing is
        # needed.
        df_column_names.remove(col.name)

    # Loop over the remaining columns in the data frame and create the new
    # column objects in the workflow
    for cname in df_column_names:
        # Create the new column
        column = Column(
            name=cname,
            workflow=workflow,
            data_type=pandas_datatype_names[data_frame[cname].dtype.name],
            is_key=is_unique_column(data_frame[cname]),
            position=workflow.columns.count() + 1
        )
        column.save()

    # Get the new set of columns with names
    wf_columns = workflow.columns.all()

    # Reorder the columns in the data frame
    data_frame = data_frame[[x.name for x in wf_columns]]

    # Store the table in the DB
    store_table(data_frame, table_name)

    # Review the column types because some "objects" are stored as booleans
    column_types = df_column_types_rename(table_name)
    for ctype, col in zip(column_types, wf_columns):
        if col.data_type != ctype:
            # If the column type in the DB is different from the one in the
            # object, update
            col.data_type = ctype
            col.save()

    # Update workflow fields and save
    workflow.nrows = data_frame.shape[0]
    workflow.ncols = data_frame.shape[1]
    workflow.set_query_builder_ops()
    workflow.data_frame_table_name = table_name
    workflow.save()

    return None
Example #9
0
def send_messages(user, action, subject, email_column, from_email,
                  cc_email_list, bcc_email_list, send_confirmation, track_read,
                  exclude_values, log_item):
    """
    Sends the emails for the given action and with the
    given subject. The subject will be evaluated also with respect to the
    rows, attributes, and conditions.

    The messages are sent in bursts with a pause in seconds as specified by the
    configuration variables EMAIL_BURST  and EMAIL_BURST_PAUSE

    :param user: User object that executed the action
    :param action: Action from where to take the messages
    :param subject: Email subject
    :param email_column: Name of the column from which to extract emails
    :param from_email: Email of the sender
    :param cc_email_list: List of emails to include in the CC
    :param bcc_email_list: List of emails to include in the BCC
    :param send_confirmation: Boolean to send confirmation to sender
    :param track_read: Should read tracking be included?
    :param exclude_values: List of values to exclude from the mailing
    :param log_item: Log object to store results
    :return: Send the emails
    """

    # Evaluate the action string, evaluate the subject, and get the value of
    # the email column.
    result = evaluate_action(action,
                             extra_string=subject,
                             column_name=email_column,
                             exclude_values=exclude_values)

    # Check the type of the result to see if it was successful
    if not isinstance(result, list):
        # Something went wrong. The result contains a message
        return result

    track_col_name = ''
    data_frame = None
    if track_read:
        data_frame = pandas_db.load_from_db(action.workflow.id)
        # Make sure the column name does not collide with an existing one
        i = 0  # Suffix to rename
        while True:
            i += 1
            track_col_name = 'EmailRead_{0}'.format(i)
            if track_col_name not in data_frame.columns:
                break

        # Get the log item payload to store the tracking column
        log_item.payload['track_column'] = track_col_name
        log_item.save()

    # Update the number of filtered rows if the action has a filter (table
    # might have changed)
    cfilter = action.get_filter()
    if cfilter and cfilter.n_rows_selected != len(result):
        cfilter.n_rows_selected = len(result)
        cfilter.save()

    # Set the cc_email_list and bcc_email_list to the right values
    if not cc_email_list:
        cc_email_list = []
    if not bcc_email_list:
        bcc_email_list = []

    # Check that cc and bcc contain list of valid email addresses
    if not all([validate_email(x) for x in cc_email_list]):
        return _('Invalid email address in cc email')
    if not all([validate_email(x) for x in bcc_email_list]):
        return _('Invalid email address in bcc email')

    # Everything seemed to work to create the messages.
    msgs = []
    track_ids = []
    for msg_body, msg_subject, msg_to in result:

        # If read tracking is on, add suffix for message (or empty)
        if track_read:
            # The track id must identify: action & user
            track_id = {
                'action': action.id,
                'sender': user.email,
                'to': msg_to,
                'column_to': email_column,
                'column_dst': track_col_name
            }

            track_str = \
                """<img src="https://{0}{1}{2}?v={3}" alt="" 
                    style="position:absolute; visibility:hidden"/>""".format(
                    Site.objects.get_current().domain,
                    ontask_settings.BASE_URL,
                    reverse('trck'),
                    signing.dumps(track_id)
                )
        else:
            track_str = ''

        # Get the plain text content and bundle it together with the HTML in
        # a message to be added to the list.
        text_content = html2text.html2text(msg_body)
        msg = EmailMultiAlternatives(msg_subject,
                                     text_content,
                                     from_email, [msg_to],
                                     bcc=bcc_email_list,
                                     cc=cc_email_list)
        msg.attach_alternative(msg_body + track_str, "text/html")
        msgs.append(msg)
        track_ids.append(track_str)

    # Add the column if needed (before the mass email to avoid overload
    if track_read:
        # Create the new column and store
        column = Column(
            name=track_col_name,
            description_text='Emails sent with action {0} on {1}'.format(
                action.name, str(timezone.now())),
            workflow=action.workflow,
            data_type='integer',
            is_key=False,
            position=action.workflow.ncols + 1)
        column.save()

        # Increase the number of columns in the workflow
        action.workflow.ncols += 1
        action.workflow.save()

        # Initial value in the data frame and store the table
        data_frame[track_col_name] = 0
        ops.store_dataframe_in_db(data_frame, action.workflow.id)

    # Partition the list of emails into chunks as per the value of EMAIL_BURST
    chunk_size = len(msgs)
    wait_time = 0
    if ontask_settings.EMAIL_BURST:
        chunk_size = ontask_settings.EMAIL_BURST
        wait_time = ontask_settings.EMAIL_BURST_PAUSE
    msg_chunks = [
        msgs[i:i + chunk_size] for i in range(0, len(msgs), chunk_size)
    ]
    for idx, msg_chunk in enumerate(msg_chunks):
        # Mass mail!
        try:
            connection = mail.get_connection()
            connection.send_messages(msg_chunk)
        except Exception as e:
            # Something went wrong, notify above
            return str(e)

        if idx != len(msg_chunks) - 1:
            logger.info(
                'Email Burst ({0}) reached. Waiting for {1} secs'.format(
                    len(msg_chunk), wait_time))
            sleep(wait_time)

    # Log the events (one per email)
    now = datetime.datetime.now(pytz.timezone(ontask_settings.TIME_ZONE))
    context = {
        'user': user.id,
        'action': action.id,
        'email_sent_datetime': str(now),
    }
    for msg, track_id in zip(msgs, track_ids):
        context['subject'] = msg.subject
        context['body'] = msg.body
        context['from_email'] = msg.from_email
        context['to_email'] = msg.to[0]
        if track_id:
            context['track_id'] = track_id
        Log.objects.register(user, Log.ACTION_EMAIL_SENT, action.workflow,
                             context)

    # Update data in the log item
    log_item.payload['objects_sent'] = len(result)
    log_item.payload['filter_present'] = cfilter is not None
    log_item.payload['datetime'] = str(
        datetime.datetime.now(pytz.timezone(ontask_settings.TIME_ZONE)))
    log_item.save()

    # If no confirmation email is required, done
    if not send_confirmation:
        return None

    # Creating the context for the confirmation email
    context = {
        'user': user,
        'action': action,
        'num_messages': len(msgs),
        'email_sent_datetime': now,
        'filter_present': cfilter is not None,
        'num_rows': action.workflow.nrows,
        'num_selected': cfilter.n_rows_selected if cfilter else -1
    }

    # Create template and render with context
    try:
        html_content = Template(str(getattr(settings,
                                            'NOTIFICATION_TEMPLATE'))).render(
                                                Context(context))
        text_content = strip_tags(html_content)
    except TemplateSyntaxError as e:
        return _('Syntax error detected in OnTask notification template '
                 '({0})').format(e)

    # Log the event
    Log.objects.register(
        user, Log.ACTION_EMAIL_NOTIFY, action.workflow, {
            'user': user.id,
            'action': action.id,
            'num_messages': len(msgs),
            'email_sent_datetime': str(now),
            'filter_present': cfilter is not None,
            'num_rows': action.workflow.nrows,
            'subject': str(getattr(settings, 'NOTIFICATION_SUBJECT')),
            'body': text_content,
            'from_email': str(getattr(settings, 'NOTIFICATION_SENDER')),
            'to_email': [user.email]
        })

    # Send email out
    try:
        send_mail(str(getattr(settings, 'NOTIFICATION_SUBJECT')),
                  text_content,
                  str(getattr(settings, 'NOTIFICATION_SENDER')), [user.email],
                  html_message=html_content)
    except Exception as e:
        return _('An error occurred when sending your notification: '
                 '{0}').format(e)

    return None
Example #10
0
def workflow_delete_column(
    workflow: Workflow,
    column: Column,
    cond_to_delete: Optional[List[Condition]] = None,
):
    """Remove column from workflow.

    Given a workflow and a column, removes it from the workflow (and the
    corresponding data frame

    :param workflow: Workflow object

    :param column: Column object to delete

    :param cond_to_delete: List of conditions to delete after removing the
    column

    :return: Nothing. Effect reflected in the database
    """
    # Drop the column from the DB table storing the data frame
    df_drop_column(workflow.get_data_frame_table_name(), column.name)

    # Reposition the columns above the one being deleted
    workflow.reposition_columns(column.position, workflow.ncols + 1)

    # Delete the column
    column.delete()

    # Update the information in the workflow
    workflow.ncols = workflow.ncols - 1
    workflow.save()

    if not cond_to_delete:
        # The conditions to delete are not given, so calculate them
        # Get the conditions/actions attached to this workflow
        cond_to_delete = [
            cond
            for cond in Condition.objects.filter(action__workflow=workflow, )
            if column in cond.columns.all()
        ]

    # If a column disappears, the conditions that contain that variable
    # are removed
    actions_without_filters = []
    for condition in cond_to_delete:
        if condition.is_filter:
            actions_without_filters.append(condition.action)

        # Formula has the name of the deleted column. Delete it
        condition.delete()

    # Traverse the actions for which the filter has been deleted and reassess
    #  all their conditions
    # TODO: Explore how to do this asynchronously (or lazy)
    map(lambda act: act.update_n_rows_selected(), actions_without_filters)

    # If a column disappears, the views that contain only that column need to
    # disappear as well as they are no longer relevant.
    for view in workflow.views.all():
        if view.columns.count() == 0:
            view.delete()
Example #11
0
    def clean(self):
        """Check that the name is legal and the categories have right value."""
        form_data = super().clean()

        # Load the data frame from the DB for various checks and leave it in
        # the form for future use
        self.data_frame = load_table(
            self.workflow.get_data_frame_table_name())

        # Column name must be a legal variable name
        if 'name' in self.changed_data:
            # Name is legal
            msg = is_legal_name(form_data['name'])
            if msg:
                self.add_error('name', msg)
                return form_data

            # Check that the name is not present already
            if next(
                (col for col in self.workflow.columns.all()
                 if col.id != self.instance.id
                    and col.name == form_data['name']),
                None,
            ):
                # New column name collides with existing one
                self.add_error(
                    'name',
                    _('There is a column already with this name'))
                return form_data

        # Categories must be valid types
        if 'raw_categories' in self.changed_data:
            if form_data['raw_categories']:
                # Condition 1: Values must be valid for the type of the column
                category_values = [
                    cat.strip()
                    for cat in form_data['raw_categories'].split(',')]
                try:
                    valid_values = Column.validate_column_values(
                        form_data['data_type'],
                        category_values)
                except ValueError:
                    self.add_error(
                        'raw_categories',
                        _('Incorrect list of values'),
                    )
                    return form_data

                # Condition 2: The values in the dataframe column must be in
                # these categories (only if the column is being edited, though
                if self.instance.name and not all(
                    vval in valid_values
                    for vval in self.data_frame[self.instance.name]
                    if vval and not pd.isnull(vval)
                ):
                    self.add_error(
                        'raw_categories',
                        _(
                            'The values in the column are not compatible '
                            + ' with these ones.'))
                    return form_data
            else:
                valid_values = []

            self.instance.set_categories(valid_values)

        # Check the datetimes. One needs to be after the other
        a_from = self.cleaned_data['active_from']
        a_to = self.cleaned_data['active_to']
        if a_from and a_to and a_from >= a_to:
            self.add_error(
                'active_from',
                _('Incorrect date/time window'))
            self.add_error(
                'active_to',
                _('Incorrect date/time window'))

        return form_data