Example #1
0
def convolute_row(source_ps, dest_ps, source_matrix_quadrant, row):
    '''Map (96,384)-well input row to (384,1536)-well output row '''

    factor = dest_ps / source_ps
    if factor != 4:
        raise ProgrammingError('convolute may only be used for '
                               'dest_ps/source_ps == 4: %d/%d' %
                               (dest_ps, source_ps))
    if source_matrix_quadrant not in [0, 1, 2, 3]:
        raise ProgrammingError('source_matrix_quadrant must be 0<=n<4: %r' %
                               source_matrix_quadrant)
    return row * factor / 2 + source_matrix_quadrant / (factor / 2)
Example #2
0
 def get_token(self):
     """
     query the api to retrive a OAuth token
     :return: the token from the api
     :rtype: Token
     """
     conn = self.databasewrapper.cursor()
     params = {'grant_type': 'client_credentials'}
     # Get client credentials params
     response = conn.session.request(
         'POST',
         self.url_token,
         params=params,
         auth=(self.settings_dict['USER'], self.settings_dict['PASSWORD']),
         stream=False
     )
     if response.status_code != 200:
         raise ProgrammingError("unable to retrive the oauth token from %s: %s" %
                                (self.url_token,
                                 message_from_response(response))
                                )
     data = response.json()
     return Token(
         datetime.datetime.now() + datetime.timedelta(seconds=data['expires_in']),
         data['access_token'],
         data['token_type'],
         data['scope']
     )
Example #3
0
    def resolve_ids(self):
        """
        attempt to resolve the ids for the current query. if the query is too complex to guess the ids,
        it will execute the query to get the ids
        :return:
        """
        ids = self.query_parser.resolve_ids()
        if ids is None:

            pk_name = self.query.get_meta().pk.name
            params = {
                'exclude[]': '*',
                'include[]': pk_name,
            }
            params.update(self.build_filter_params())
            result = self.connection.cursor().get(get_resource_path(
                self.query.model),
                                                  params=params)
            if result.status_code != 200:
                raise ProgrammingError(
                    "error while querying the database : %s" % result.text)
            ids = {
                res['id']
                for res in result.json()[get_resource_name(self.query.model,
                                                           many=True)]
            }
        return ids
Example #4
0
    def fabs_cursor(limit=None):
        db_cursor = connections['default'].cursor()
        db_query = """
            WITH deletable AS (
                SELECT  afa_generated_unique,
                        bool_or(is_active) AS is_active,
                        bool_or(correction_late_delete_ind in ('d', 'D'))
                            AS deleted
                FROM    broker.published_award_financial_assistance
                GROUP BY 1
                HAVING  bool_or(is_active) = false
                AND     bool_or(correction_late_delete_ind in ('d', 'D'))
                )
            SELECT tf.afa_generated_unique
            FROM   transaction_fabs tf
            JOIN   deletable d USING (afa_generated_unique)
            """

        if limit:
            db_query += ' LIMIT {}'.format(limit)
        try:
            db_cursor.execute(db_query)
        except ProgrammingError as e:
            if 'broker.published_award_financial_assistance' in str(e):
                msg = str(
                    e
                ) + '\nRun database_scripts/broker_matviews/broker_server.sql\n\n'
                raise ProgrammingError(str(e) + msg)
            else:
                raise

        return db_cursor
Example #5
0
    def autoinc_sql(self, table, column):
        if not self.sql_create_sequence and not self.sql_create_trigger:
            return None

        model, field = self._enforce_model_field_instances(table, column)
        sequence_name = self._get_sequence_name(model, field)
        trigger_name = self._get_trigger_name(model, field)

        args = {
            'sq_name': sequence_name,
            'tr_name': trigger_name,
            'tbl_name': self.quote_name(table),
            'col_name': self.quote_name(column),
        }

        try:
            _, max_value = self.integer_field_range(field.get_internal_type())
            args.update(sq_max_value=max_value)
        except KeyError:
            pass

        try:
            sequence_sql = self._get_sequence_sql(sequence_name, args)
            trigger_sql = self.sql_create_trigger % args
            return [*sequence_sql, trigger_sql]
        except KeyError as err:
            if 'sq_max_value' in err.args:
                raise ProgrammingError(
                    'Cannot retrieve the range of the column type bound to the '
                    'field %s' % field.name)
Example #6
0
    def aggregate():
        query = objects.filter(q_filters)
        fields = []
        annotations = {
            'value':
            getattr(aggregates,
                    aggregation_function.value.title())(field.value),
        }

        try:
            if group_by:
                fields += [g.value for g in group_by]
                query = query.values(*fields).annotate(**annotations)
                return list(pagination.query(query).values(*fields, 'value'))

            else:
                query = query.aggregate(**annotations)
                return [query]

        except ProgrammingError as error:
            if error.__cause__.pgcode in (
                    psycopg2_error_codes.UNDEFINED_FUNCTION):
                raise RequestValidationError([
                    ErrorWrapper(ProgrammingError(),
                                 ("query", "aggregation_function"))
                ]) from error

            raise
Example #7
0
    def __init__(self, ctx, to_email):
        if self.template is None:
            raise ProgrammingError("Email subclasses must set template")

        self.to_email = to_email

        ctx = {'BASE_URL': settings.BASE_URL, **ctx}
        self.ctx = ctx

        # Templates can end up with unwanted newlines. Convert them all to spaces
        self.subject: str = render_block_to_string(self.template, 'subject',
                                                   ctx).strip()
        self.subject = re.sub(r'\s+', ' ', self.subject)

        # Make links absolute
        self.html = render_block_to_string(self.template, 'html', ctx)
        self._make_links_absolute()

        try:
            self.plain = render_block_to_string(self.template, 'plain',
                                                ctx).strip()
        except BlockNotFound:
            h = HTML2Text()
            h.ignore_images = True
            h.ignore_emphasis = True
            h.ignore_tables = True
            self.plain = h.handle(self.html)
Example #8
0
def deconvolute_matrices(input_matrices, source_ps, dest_ps):
    '''Map (1536,384)-well input matrices to (384,96)-well output matrices '''

    factor = source_ps / dest_ps
    if factor != 4:
        raise ProgrammingError('deconvolute may only be used for '
                               'source_ps/dest_ps == 4: %d/%d' %
                               (source_ps, dest_ps))

    logger.debug(
        'deconvolute_matrices: convert %d input matrices to %d output',
        len(input_matrices),
        len(input_matrices) * factor)

    output_matrices = []

    for input_matrix in input_matrices:
        new_output_matrices = [
            create_blank_matrix(dest_ps) for i in range(0, 4)
        ]
        output_matrices += new_output_matrices
        for rownum, row in enumerate(input_matrix):
            for colnum, val in enumerate(row):
                output_quadrant = deconvolute_quadrant(source_ps, dest_ps,
                                                       rownum, colnum)
                output_row = deconvolute_row(source_ps, dest_ps, rownum,
                                             colnum)
                output_col = deconvolute_col(source_ps, dest_ps, rownum,
                                             colnum)

                new_output_matrices[output_quadrant][output_row][
                    output_col] = val

    return output_matrices
Example #9
0
    def execute_sql(self, result_type=MULTI):
        self.setup_query()
        if not result_type:
            result_type = NO_RESULTS
        try:
            is_special, result = self.special_cases(result_type)
            if is_special:
                return result

            pk, params = self.build_params_and_pk()
            url = get_resource_path(self.query.model, pk)
            response = self.connection.cursor().get(url, params=params)
            self.raise_on_response(url, params, response)

            json = response.json()
            meta = self.get_meta(json, response)
            if meta:
                # pagination and others thing

                high_mark = self.query.high_mark
                page_to_stop = None if high_mark is None else (
                    high_mark // meta['per_page'])

                def next_from_query():
                    for i in range(meta['page'], page_to_stop
                                   or meta['total_pages']):
                        tmp_params = params.copy()
                        tmp_params[
                            'page'] = i + 1  # + 1 because of range include start and exclude stop
                        last_response = self.connection.cursor().get(
                            url, params=tmp_params)
                        yield last_response.json()

            else:
                next_from_query = None

        except EmptyResultSet:
            if result_type == MULTI:
                return iter([])
            else:
                return

        if result_type == CURSOR:
            # Caller didn't specify a result_type, so just give them back the
            # cursor to process (and close).
            raise ProgrammingError(
                "returning a cursor for this database is not supported")
        if result_type == SINGLE:
            response_reader = ApiResponseReader(json, many=pk is None)
            for result in self.result_iter(response_reader):
                return result
            return
        if result_type == NO_RESULTS:
            return
        response_reader = ApiResponseReader(json,
                                            next_=next_from_query,
                                            many=pk is None)
        result = self.result_iter(response_reader)
        return result
Example #10
0
def convolute_col(source_ps, dest_ps, source_matrix_quadrant, col):
    ''''Map (96,384)-well input col to (384,1536)-well output col '''

    factor = dest_ps / source_ps
    if factor != 4:
        raise ProgrammingError('convolute may only be used for '
                               'dest_ps/source_ps == 4: %d/%d' %
                               (dest_ps, source_ps))
    return col * factor / 2 + source_matrix_quadrant % (factor / 2)
Example #11
0
    def db(self):
        if self._db:
            return self._db

        if self._horizontal_key is None:
            raise ProgrammingError("Missing horizontal key field's filter")

        self._add_hints(horizontal_key=self._horizontal_key)
        return super(HorizontalQuerySet, self).db
Example #12
0
 def test_it_warns_if_a_field_isnt_available(self, mocker):
     """This is to allow for un-applied to migrations to not break running migrations."""
     ActionFactory(signed=True)
     mock_canonical_json = mocker.patch(
         "normandy.recipes.models.Action.canonical_json")
     mock_canonical_json.side_effect = ProgrammingError("error for testing")
     errors = checks.action_signatures_are_correct(None)
     assert len(errors) == 1
     assert errors[0].id == checks.WARNING_COULD_NOT_CHECK_SIGNATURES
Example #13
0
def deconvolute_quadrant(source_ps, dest_ps, row, col):
    '''Map (1536,384)-well row,col to (384,96)-well output quadrant '''

    factor = source_ps / dest_ps
    if factor != 4:
        raise ProgrammingError('Deconvolute may only be used for '
                               'source_ps/dest_ps == 4: %d/%d' %
                               (source_ps, dest_ps))

    return col % (factor / 2) + (row % (factor / 2)) * (factor / 2)
Example #14
0
def deconvolute_col(source_ps, dest_ps, row, col):
    '''Map (1536,384)-well input col to (384,96)-well output col'''

    dest_matrix_number = deconvolute_quadrant(source_ps, dest_ps, row, col)
    factor = source_ps / dest_ps
    if factor != 4:
        raise ProgrammingError('Deconvolute may only be used for '
                               'source_ps/dest_ps == 4: %d/%d' %
                               (source_ps, dest_ps))
    return col / (factor / 2) + col % (factor /
                                       2) - dest_matrix_number % (factor / 2)
Example #15
0
def deconvolute_row(source_ps, dest_ps, row, col):
    '''Map (1536,384-)well input row to (384-)well output row '''

    dest_matrix_number = deconvolute_quadrant(source_ps, dest_ps, row, col)
    factor = source_ps / dest_ps
    if factor != 4:
        raise ProgrammingError('Deconvolute may only be used for '
                               'source_ps/dest_ps == 4: %d/%d' %
                               (source_ps, dest_ps))
    return row / (factor / 2) + row % (factor /
                                       2) - dest_matrix_number / (factor / 2)
Example #16
0
    def execute_sql(self, result_type=MULTI):
        if self.is_api_model():

            q = self.query
            # we don't care about many2many table, the api will clean it for us
            if not self.query.get_meta().auto_created:

                for id in self.resolve_ids():
                    result = self.connection.cursor().delete(
                        get_resource_path(q.model, pk=id), )
                    if result.status_code not in (200, 202, 204):
                        raise ProgrammingError("the deletion has failed : %s" %
                                               result.text)
Example #17
0
    def test_sync_with_tethys_db_exists_progamming_error(
            self, mock_te, mock_log):
        mock_warning = mock_log.warning
        ext = tethys_app_base.TethysExtensionBase()
        ext.root_url = 'test_url'
        mock_te.objects.filter().all.side_effect = ProgrammingError(
            'test_error')
        ext.sync_with_tethys_db()

        # Check_result
        mock_warning.assert_called_with(
            "Unable to sync extension with database. "
            "tethys_apps_tethysextension table does not exist")
Example #18
0
 def execute_sql(self, result_type=MULTI):
     updated = 0
     if self.is_api_model():
         q = self.query
         json = self.resolve_data()
         for id in self.resolve_ids():
             result = self.connection.cursor().patch(get_resource_path(
                 q.model, pk=id),
                                                     json=json)
             if result.status_code not in (200, 202, 204):
                 raise ProgrammingError("the update has failed : %s" %
                                        result.text)
             updated += 1
     return updated
Example #19
0
    def get_index(self, reading_hash):
        ''' Find the index for a counter "reading" '''

        if len(self.counter_hash) != len(reading_hash):
            raise ProgrammingError(
                'counter hash: %r does not match size: index hash %r ' %
                (self.counter_hash.keys(), reading_hash.keys()))

        indexValue = 0
        for key, value in reading_hash.items():
            position = self.counter_hash.keys().index(key)
            if value not in self.counter_hash.values()[position]:
                raise ProgrammingError(
                    '%r:%r not in digits: %r' %
                    (key, value, self.counter_hash.values()[position]))
            positionValue = self.counter_hash.values()[position].index(value)
            for lesserPositionDigits in self.counter_hash.values()[position +
                                                                   1:]:
                positionValue *= len(lesserPositionDigits)
            logger.debug('key: %r:%r, positionValue: %d', key, value,
                         positionValue)
            indexValue += positionValue

        return indexValue
Example #20
0
def convolute_wells(source_ps, dest_ps, wells):
    '''
    Map a list of wells from (96,384) plate format to (384,1536) plate format.
    '''
    convoluted_wells = []
    factor = dest_ps / source_ps
    if factor != 4:
        raise ProgrammingError('convolute may only be used for '
                               'dest_ps/source_ps == 4: %d/%d' %
                               (source_ps, dest_ps))
    for wellname in wells:
        convoluted_wells.extend(convolute_well(source_ps, dest_ps, wellname))
    logger.debug('wells convoluted: %r, %r, %r, %r', wells, source_ps, dest_ps,
                 convoluted_wells)
    return convoluted_wells
Example #21
0
    def order_by(self, ordering):
        if isinstance(ordering, bool):
            self.reverse_order = not ordering
            return

        for order in ordering:
            if isinstance(order, basestring):
                if order.startswith('-'):
                    field_name = order[1:]
                    ascending = False

                else:
                    field_name = order
                    ascending = True

            elif 2 == len(order):
                field, ascending = order
                field_name = field.name

            else:
                raise ProgrammingError(
                    'Invalid ordering specification: %s' % order, )

            order_string = ''.join(['' if ascending else '-', field_name])

            partition_key_filtered = True
            for partition_key in self.partition_columns:
                found = False
                for filter_tuple in self.filters:
                    field = filter_tuple[0]
                    if isinstance(field, ForeignKey):
                        partition_key = re.sub('_id$', '', partition_key)

                    if partition_key == field.name:
                        found = True
                        continue

                if not found:
                    partition_key_filtered = False
                    break

            if (partition_key_filtered
                    and field_name in self.clustering_columns
                    and not self.inefficient_ordering):
                self.ordering.append(order_string)

            else:
                self.add_inefficient_order_by(order_string)
Example #22
0
def convolute_well(source_ps, dest_ps, wellname):
    '''
    Map a list of wells from (96,384) plate format to (384,1536) plate format.
    '''
    factor = dest_ps / source_ps
    if factor != 4:
        raise ProgrammingError('convolute may only be used for '
                               'dest_ps/source_ps == 4: %d/%d' %
                               (dest_ps, source_ps))
    convoluted_wells = []
    (row, col) = well_row_col(wellname)
    for quadrant in range(0, factor):
        new_row = convolute_row(source_ps, dest_ps, quadrant, row)
        new_col = convolute_col(source_ps, dest_ps, quadrant, col)
        convoluted_wells.append(get_well_name(new_row, new_col))
    return convoluted_wells
Example #23
0
    def _build_aliases(self):
        aliases = {}
        query = self.query
        alias_not_resolved = list(query.alias_map.values())
        current_fail = 0
        m2m_resolved = {}
        while alias_not_resolved and current_fail <= len(alias_not_resolved):
            table = alias_not_resolved.pop()
            """:type: django.db.models.sql.datastructures.Join | django.db.models.sql.datastructures.BaseTable"""

            if table.parent_alias is None:
                # this is the base table
                aliases[table.table_alias] = Alias(query.model, None, None,
                                                   None, None)
                current_fail = 0
                continue
            # table is current model repr, join_field is the field on the remote model that triggered the link
            # and so the related model is the current one
            current_model = table.join_field.related_model
            try:
                m2m_field = None
                parent_alias = aliases[table.parent_alias]

                if current_model._meta.auto_created:
                    m2m = find_m2m_field(table.join_field.field)
                    m2m_resolved[table.table_alias] = m2m, parent_alias
                    m2m_field = m2m

                # not M2M relathionship, but may be folowing previous m2m
                if table.parent_alias in m2m_resolved:
                    field, parent = m2m_resolved[table.parent_alias]
                    # expand the previous alias that is useless
                else:
                    field, parent = table.join_field, parent_alias
                aliases[table.table_alias] = Alias(current_model, parent,
                                                   field, field.name,
                                                   m2m_field)
                current_fail = 0
            except KeyError:
                # the table parent is not already resolved
                alias_not_resolved.insert(0, table)
                current_fail += 1
        if alias_not_resolved:
            raise ProgrammingError(
                "impossible to resolve table hierachy: %s" %
                [a.__dict__ for a in query.alias_map.values()])
        return aliases
Example #24
0
    def check_compatibility(self):
        """
        check if the query is not using some feathure that we don't provide
        :param django.db.models.sql.query.Query query:
        :return: nothing
        :raise: NotSupportedError
        """
        query = self.query
        if query.group_by is not None:
            raise FakeDatabaseDbAPI2.NotSupportedError(
                'group by is not supported')
        if query.distinct and self.connection.settings_dict.get(
                'PREVENT_DISTINCT', True):
            raise FakeDatabaseDbAPI2.NotSupportedError(
                'distinct is not supported')
        # check where
        where_nodes = [query.where]
        while where_nodes:
            where = where_nodes.pop()
            is_and = where.connector == 'AND'
            is_negated = where.negated
            # AND xor negated
            is_simple_lookup = len(where.children) == 1

            exact_pk_value = extract_exact_pk_value(where)
            if exact_pk_value is not None:
                pass
            elif is_simple_lookup or (is_and and not is_negated):
                for child in where.children:
                    if isinstance(child, WhereNode):
                        where_nodes.append(child)
                    elif isinstance(child, Lookup):
                        if not child.rhs_is_direct_value():
                            raise FakeDatabaseDbAPI2.NotSupportedError(
                                "nested queryset is not supported")
                    elif isinstance(child, SubqueryConstraint):
                        raise FakeDatabaseDbAPI2.NotSupportedError(
                            "nested queryset is not supported")
                    else:  # pragma: no cover
                        raise ProgrammingError(
                            "unknown type for compiling the query : %s."
                            " expeced a Lookup or WhereNode" % child.__class__)
            else:
                reason = "NOT (.. AND ..)" if is_negated else "OR"
                raise FakeDatabaseDbAPI2.NotSupportedError(
                    "%s in queryset is not supported yet" % reason)
Example #25
0
 def raise_on_response(self, url, params, response):
     """
     raise a exception with a explicit message if the respones from the backend is not a 200, 202 or 204
     :param url:
     :param params:
     :param response:
     :return:
     """
     if response.status_code == 404:
         raise EmptyResultSet()
     if response.status_code == 204:
         raise EmptyResultSet()
     elif response.status_code != 200:
         raise ProgrammingError(
             "the query to the api has failed : GET %s/%s \n=> %s" %
             (self.connection.connection.url, build_url(
                 url, params), message_from_response(response)))
Example #26
0
    def load_adverts(cls, catalog: str) -> None:
        """
        Loads data from files and saves to the database.

        :param catalog: Catalog name with files to be added.
        """

        path = os.path.join(catalog, "*.csv")
        files = glob.glob(path)

        if files:
            for file in files:
                adv = pd.read_csv(file)
                try:
                    for item in adv.values:
                        cls.create(item)
                except ProgrammingError:
                    raise ProgrammingError(
                        "You have to make migrations before add data to database."
                    )
        else:
            raise FileNotFoundError("No files to added.")
        logging.info("Data successfully updated.")
Example #27
0
def convolute_matrices(input_matrices, source_ps, dest_ps):
    '''Map (96,384)-well input matrices to (384,1536)-well output matrices '''

    factor = dest_ps / source_ps
    if factor != 4:
        raise ProgrammingError('convolute may only be used for '
                               'dest_ps/source_ps == 4: %d/%d' %
                               (source_ps, dest_ps))

    assert len(
        input_matrices) % 4 == 0, 'input_matrices count must be a factor of 4'

    output_size = len(input_matrices) / 4

    logger.info('convolute_matrices: convert %d input matrices to %d output',
                len(input_matrices), output_size)

    output_matrices = []
    for i in range(0, output_size):
        output_matrix = create_blank_matrix(dest_ps)
        output_matrices.append(output_matrix)

        start = i * 4
        end = start + 4
        quadrant_matrices = input_matrices[start:end]
        # fill the matrix
        for quadrant, matrix in enumerate(quadrant_matrices):
            for rownum, row in enumerate(matrix):
                for colnum, val in enumerate(row):
                    dest_row = convolute_row(source_ps, dest_ps, quadrant,
                                             rownum)
                    dest_col = convolute_col(source_ps, dest_ps, quadrant,
                                             colnum)
                    output_matrix[dest_row][dest_col] = val

    return output_matrices
Example #28
0
def get_xls_response(data,
                     output_filename,
                     request=None,
                     image_keys=None,
                     title_function=None,
                     list_brackets=None):
    '''
    Create an xlsx file that will be streamed through the StreamingHttpResponse.
    
    - if length exceeds MAX_ROWS_PER_XLS_FILE, create multiple files and zip them.
    - TODO: when using xlsx, can simply add extra sheets to the file.
    
    @param output_filename - for naming temp files
 
    TODO: wrap cursor with cursorgenerator; pass in the image columns as arg
    TODO: rework this using the generic_xlsx_response as a template:
    - this method is used for all xlsx serialization at this time, except 
    for in testing, and in ScreenResultSerializer - 20160419.
    '''

    if not isinstance(data, dict):
        raise ProgrammingError(
            'unknown data for xls serialization: %r, must be a dict of '
            'sheet_row entries' % type(data))

    # Create a temp dir to store multiple generated files
    temp_dir = os.path.join(settings.TEMP_FILE_DIR,
                            str(time.clock()).replace('.', '_'))
    os.mkdir(temp_dir)

    try:
        # Create an new Excel file and add a worksheet.
        filename = '%s.xlsx' % (output_filename)
        temp_file = os.path.join(temp_dir, filename)
        file_names_to_zip = [temp_file]

        if DEBUG_STREAMING:
            logger.info('temp file: %r', temp_file)

        workbook = xlsxwriter.Workbook(temp_file, {'constant_memory': True})

        for key, sheet_rows in data.items():

            # Determine if the source is a dict, a string, or an iterator

            if isinstance(sheet_rows, (dict, OrderedDict)):

                sheet_name = default_converter(key)
                logger.info('writing sheet %r...', sheet_name)
                sheet = workbook.add_worksheet(sheet_name)
                for i, row in enumerate(csvutils.dict_to_rows(sheet_rows)):
                    sheet.write_row(i, 0, row)

            elif isinstance(sheet_rows, basestring):
                sheet_name = default_converter(key)
                logger.info('writing single string sheet %r...', sheet_name)
                sheet = workbook.add_worksheet(sheet_name)
                sheet.write_string(0, 0, sheet_rows)

            else:
                # Sheet data is defined in an iterator or rows

                sheet_name = default_converter(key)
                logger.info('writing sheets for base name %r...', sheet_name)

                max_rows_per_sheet = 2**20
                sheet = workbook.add_worksheet(sheet_name)
                filerow = 0
                cumulative_filerows = 0
                sheets = 1

                for row, values in enumerate(sheet_rows):

                    if filerow == 0:
                        for i, (key, val) in enumerate(values.items()):
                            title = key
                            if title_function:
                                title = title_function(key)
                            sheet.write_string(filerow, i, title)
                        filerow += 1

                    for i, (key, val) in enumerate(values.items()):

                        val = csvutils.convert_list_vals(
                            val,
                            delimiter=LIST_DELIMITER_XLS,
                            list_brackets=list_brackets)

                        if val is not None:

                            if image_keys and key in image_keys:
                                max_rows_per_sheet = MAX_IMAGE_ROWS_PER_XLS_FILE
                                if not request:
                                    raise Exception(
                                        'must specify the request parameter for image export'
                                    )
                                # Hack to speed things up for the db.api:
                                if (key == 'structure_image'
                                        and 'library_well_type' in values and
                                        values['library_well_type'].lower()
                                        == 'empty'):
                                    continue
                                write_xls_image(sheet, filerow, i, val,
                                                request)
                            else:
                                if isinstance(val, numbers.Number):
                                    sheet.write_number(filerow, i, val)
                                else:
                                    if len(val) > 32767:
                                        logger.error(
                                            'warn, row too long, %d, key: %r, len: %d',
                                            filerow, key, len(val))
                                    sheet.write_string(filerow, i, val)
                    filerow += 1
                    if row % 10000 == 0:
                        logger.info('wrote %d rows to temp file', row)

                    if filerow >= max_rows_per_sheet:
                        cumulative_filerows += filerow
                        workbook.close()
                        logger.info('wrote file: %r', temp_file)

                        # Create an new Excel file and add a worksheet.
                        filename = '%s_%s.xlsx' % (output_filename,
                                                   cumulative_filerows)
                        temp_file = os.path.join(temp_dir, filename)
                        workbook = xlsxwriter.Workbook(
                            temp_file, {'constant_memory': True})
                        sheet = workbook.add_worksheet(sheet_name)
                        file_names_to_zip.append(temp_file)
                        filerow = 0

                logger.info('wrote %d filerows to file: %r', filerow,
                            temp_file)

        workbook.close()

        content_type = '%s; charset=utf-8' % XLSX_MIMETYPE
        if len(file_names_to_zip) > 1:
            content_type = '%s; charset=utf-8' % ZIP_MIMETYPE
            temp_file = os.path.join('/tmp', str(time.clock()))
            logger.info('temp ZIP file: %r', temp_file)

            with ZipFile(temp_file, 'w') as zip_file:
                for _file in file_names_to_zip:
                    zip_file.write(_file, os.path.basename(_file))
            logger.info('wrote file %r', temp_file)
            filename = '%s.zip' % output_filename

        _file = file(temp_file)
        logger.info('download tmp file: %r, %r', temp_file, _file)
        wrapper = FileWrapper(_file)
        response = StreamingHttpResponse(wrapper, content_type=content_type)
        response['Content-Length'] = os.path.getsize(temp_file)
        response['Content-Disposition'] = \
            'attachment; filename=%s' % filename
        return response
    except Exception, e:
        logger.exception('xls streaming error')
        raise e
Example #29
0
def transform(input_matrices, counter, aps, lps):

    assert aps in ALLOWED_MATRIX_SIZES, \
        ('assay_plate_size must be one of %r' % ALLOWED_MATRIX_SIZES)
    assert lps in ALLOWED_MATRIX_SIZES, \
        ('library_plate_size must be one of %r' % ALLOWED_MATRIX_SIZES)

    if aps < lps:
        logger.info('convolute matrices')
        factor = lps / aps
        if factor != 4:
            msg = (
                'Convolute: library_plate_size/assay_plate_size != 4: %d/%d' %
                (aps, lps))
            raise ValidationError({
                'assay_plate_size': msg,
                'library_plate_size': msg
            })
        if len(input_matrices) % 4 != 0:
            msg = 'Convolute: input matrix array must contain a multiple of 4 members'
            raise ValidationError({
                'assay_plate_size': msg,
                'library_plate_size': msg
            })
        # Create an adjusted counter to match the input:
        # - add quadrant counter to the right of plate counter
        new_counter_hash = OrderedDict()
        for key, value in counter.counter_hash.items():
            new_counter_hash[key] = value
            if key == 'plate':
                new_counter_hash['quadrant'] = [0, 1, 2, 3]
        counter96 = Counter(new_counter_hash)
        logger.info('counter96: %r', counter96)
        if counter96.size() != len(input_matrices):
            raise ProgrammingError(
                'input_matrices length (%d) must match '
                'the counter length with 4 quadrants: (%d)' %
                (len(input_matrices), counter96.size()))

        # - Create blank output matrices
        convoluted_matrices = [
            lims_utils.create_blank_matrix(lps)
            for x in range(0,
                           len(input_matrices) / 4)
        ]

        # Iterate through output (384) matrices and find the 96 matrix values
        # NOTE: could also start by iterating through input matrices
        for index, matrix in enumerate(convoluted_matrices):
            readout = counter.get_readout(index)
            for rownum, row in enumerate(matrix):
                for colnum in range(0, len(row)):
                    input_quadrant = lims_utils.deconvolute_quadrant(
                        lps, aps, rownum, colnum)
                    readout96 = dict(readout, quadrant=input_quadrant)
                    logger.debug(
                        'index: %d, 384 readout: %r, quadrant: %d, 96: %r',
                        index, readout, input_quadrant, readout96)
                    logger.debug('counter96: %r' % counter96.counter_hash)
                    input_index = counter96.get_index(readout96)
                    input_row = lims_utils.deconvolute_row(
                        lps, aps, rownum, colnum)
                    input_col = lims_utils.deconvolute_col(
                        lps, aps, rownum, colnum)
                    logger.debug('find: index: %d, cell: [%d][%d]',
                                 input_index, input_row, input_col)
                    row[colnum] = input_matrices[input_index][input_row][
                        input_col]

        return convoluted_matrices

    elif lps < aps:
        logger.info('deconvolute matrices')
        factor = aps / lps
        if factor != 4:
            msg = (
                'Deconvolute: assay_plate_size/library_plate_size != 4: %d/%d'
                % (aps, lps))
            raise ValidationError({
                'assay_plate_size': msg,
                'library_plate_size': msg
            })
        # Create an adjusted counter to match the input
        plates = counter.counter_hash.get('plate')
        logger.info('plates: %r', plates)
        if len(plates) % 4 != 0:
            msg = 'Deconvolute: plate count must be a multiple of 4: %d' % len(
                plates)
            raise ValidationError({'plate_ranges': msg})

        plates_1536 = OrderedDict()
        for i, plate in enumerate(plates):
            plate_number_1536 = i / 4
            if plate_number_1536 not in plates_1536:
                plates_1536[plate_number_1536] = []
            plates_1536[plate_number_1536].append(plate)
        logger.info('plates_1536: %r', plates_1536)
        new_counter_hash = counter.counter_hash.copy()
        new_counter_hash['plate'] = plates_1536.keys()
        counter1536 = Counter(new_counter_hash)

        # Create blank output matrices
        deconvoluted_matrices = [
            None for x in range(0,
                                len(input_matrices) * 4)
        ]
        # Iterate through input (1536) matrices and find the output 384 matrix value
        for index, matrix in enumerate(input_matrices):
            readout1536 = counter1536.get_readout(index)
            plate1536 = readout1536['plate']

            # Convert each 1536 plate separately, and find the output matrix position
            output_384_matrices = lims_utils.deconvolute_matrices([matrix],
                                                                  aps, lps)

            for quadrant, matrix384 in enumerate(output_384_matrices):
                plate384 = plates_1536[plate1536][quadrant]
                readout384 = dict(readout1536, plate=plate384)
                index384 = counter.get_index(readout384)

                deconvoluted_matrices[index384] = matrix384

        return deconvoluted_matrices

    else:
        return input_matrices
Example #30
0
 def all(cls):
     raise ProgrammingError(
         'relation "{}" does not exist\n'.format(error_table))