Esempio n. 1
0
def batch_modify_label(csv_id):
    csv_file = CSVFile.query.get_or_404(csv_id)
    args = modify_label_list_schema.load(request.json or {})
    row_column_dump_schema = CSVFileSchema(
        only=['rows', 'columns', 'group_levels'])

    for change in args['changes']:
        index = change['index']
        label = change['label']
        context = change['context']

        if context == AXIS_NAME_TYPES.ROW:
            row = TableRow.query.get_or_404((csv_id, index))
            if label == TABLE_ROW_TYPES.INDEX:
                csv_file.header_row_index = index
            setattr(row, 'row_type', label)
            db.session.add(row)

        elif context == AXIS_NAME_TYPES.COLUMN:
            column = TableColumn.query.get_or_404((csv_id, index))
            if label == TABLE_COLUMN_TYPES.INDEX:
                csv_file.key_column_index = index
            elif label == TABLE_COLUMN_TYPES.GROUP:
                csv_file.group_column_index = index
            setattr(column, 'column_type', label)
            db.session.add(column)

    db.session.add(csv_file)

    try:
        db.session.commit()
        return jsonify(row_column_dump_schema.dump(csv_file))
    except Exception:
        db.session.rollback()
        raise
Esempio n. 2
0
from viime.models import CSVFileSchema, db

csv_file_schema = CSVFileSchema()


def generate_csv_file(data):
    csv_file = csv_file_schema.load({
        'table': data,
        'name': 'test_csv_file.csv'
    })
    db.session.add(csv_file)
    db.session.commit()
    return csv_file


def test_no_header(app):
    with app.test_request_context():
        csv = generate_csv_file("""
a,1,2,3
b,4,5,6
c,7,8,9
""")
        db.session.commit()

        csv.header_row_index = None
        db.session.commit()

        assert csv.headers == ['col1', 'col2', 'col3', 'col4']
        assert list(csv.indexed_table.columns) == ['col2', 'col3', 'col4']

Esempio n. 3
0
def _serialize_csv_file(csv_file):
    csv_file_schema = CSVFileSchema()
    return csv_file_schema.dump(csv_file)
Esempio n. 4
0
def test_merge_files(client):
    csv_file_schema = CSVFileSchema()
    csv_file1 = csv_file_schema.load({
        'table': 'id,g,col1,col2\nrow1,g,0.5,2.0\nrow2,g,1.5,0\nrow3,h,3,-1.0\n',
        'name': 'test_csv_file1.csv'
    })
    csv_file2 = csv_file_schema.load({
        'table': 'id,g,col3,col4\nrow1,g,0.5,2.0\nrow2,g,1.5,0\nrow4,h,3,-1.0\n',
        'name': 'test_csv_file2.csv'
    })

    db.session.add_all([csv_file1, csv_file2])
    db.session.flush()

    validated1 = ValidatedMetaboliteTable.create_from_csv_file(csv_file1)
    validated2 = ValidatedMetaboliteTable.create_from_csv_file(csv_file2)
    db.session.add_all([validated1, validated2])
    db.session.commit()

    resp = client.post(url_for('csv.merge_csv_files'),
                       json={
                           'name': 'merge',
                           'description': '',
                           'method': 'simple',
                           'datasets': [csv_file1.id, csv_file2.id]
                        })
    assert resp.status_code == 201

    expected_column_types = [{
        'column_header': 'nan',
        'column_index': 0,
        'column_type': 'key',
        'subtype': None,
        'meta': None
    }, {
        'column_header': 'g',
        'column_index': 1,
        'column_type': 'group',
        'subtype': None,
        'meta': None
    }, {
        'column_header': 'g_1',
        'column_index': 2,
        'column_type': 'group',
        'subtype': None,
        'meta': None
    }, {
        'column_header': 'col1',
        'column_index': 3,
        'column_type': 'measurement',
        'subtype': None,
        'meta': None
    }, {
        'column_header': 'col2',
        'column_index': 4,
        'column_type': 'measurement',
        'subtype': None,
        'meta': None
    }, {
        'column_header': 'col3',
        'column_index': 5,
        'column_type': 'measurement',
        'subtype': None,
        'meta': None
    }, {
        'column_header': 'col4',
        'column_index': 6,
        'column_type': 'measurement',
        'subtype': None,
        'meta': None
    }]

    expected_row_types = [{
        'row_index': 0,
        'row_name': 'nan',
        'row_type': 'header',
        'subtype': None,
        'meta': None
    }, {
        'row_index': 1,
        'row_name': 'Data Source',
        'row_type': 'metadata',
        'subtype': 'categorical',
        'meta': dict(levels=[dict(name='DS1', label='test_csv_file1.csv', color='#8dd3c7'),
                             dict(name='DS2', label='test_csv_file2.csv', color='#ffffb3')])
    }, {
        'row_index': 2,
        'row_name': 'row1',
        'row_type': 'sample',
        'subtype': None,
        'meta': None
    }, {
        'row_index': 3,
        'row_name': 'row2',
        'row_type': 'sample',
        'subtype': None,
        'meta': None
    }]

    assert resp.json['columns'] == expected_column_types
    assert resp.json['rows'] == expected_row_types

    # test remerge which shouldn't change a thing

    resp = client.post(url_for('csv.remerge_csv_file', csv_id=resp.json['id']))
    assert resp.status_code == 200
    assert resp.json['columns'] == expected_column_types
    assert resp.json['rows'] == expected_row_types