Esempio n. 1
0
def parse_csv_p(fp, **kw):
    metadata = {}
    pdata = {}
    header = False
    pivot_col = int(kw.get('pivot_col', 0))
    results_col = [
        int(i.strip()) for i in kw.get('results_col', '1').split(',')
    ]
    for line in fp.readlines():
        info = line.split(',')
        if len(info) < 2:
            raise Exception("Not enough fields in line:\n%s" % line.strip())
        pivot = info[pivot_col]
        if not header:
            metadata['pivot_name'] = pivot
            columns = ['Unknown'] * len(results_col)
            for col in range(len(results_col)):
                columns[col] = info[results_col[col]]
            metadata['column_names'] = ','.join(columns)
            header = True
            continue
        if pivot not in pdata:
            pdata[pivot] = new_data(info, results_col)
        else:
            pdata[pivot] = add_data(pdata[group], info, results_col)
    return pdata, metadata
Esempio n. 2
0
def parse_csv_pg(fp, **kw):
    metadata = {}
    pgdata = {}
    header = False
    if 'header' in kw:
        info = kw['header'].split(',')
        metadata['pivot_name'] = info[0]
        metadata['grouping_col'] = info[1]
        metadata['column_names'] = ','.join(info[2:])
        header = True
    pivot_col = int(kw.get('pivot_col', 0))
    grouping_col = int(kw.get('grouping_col', 1))
    grouping_format = kw.get('grouping_format', False)
    results_col = [
        int(i.strip()) for i in kw.get('results_col', '2').split(',')
    ]
    for line in fp.readlines():
        info = line.split(',')
        if len(info) < 3:
            raise Exception("Not enough fields in line:\n%s" % line.strip())
        pivot = info[pivot_col]
        group = info[grouping_col]
        if not header:
            metadata['pivot_name'] = pivot
            metadata['grouping_name'] = group
            columns = ['Unknown'] * len(results_col)
            for col in range(len(results_col)):
                columns[col] = info[results_col[col]]
            metadata['column_names'] = ','.join(columns)
            header = True
            continue
        if pivot not in pgdata:
            pgdata[pivot] = {}
        group_dict = pgdata.get(pivot, {})
        if grouping_format:
            #grouping_tuple = time.strptime('080413', grouping_format)
            grouping_tuple = time.strptime(group, grouping_format)
            group = datetime.datetime(*grouping_tuple[:6])
        if group not in group_dict:
            group_dict[group] = new_data(info, results_col)
        else:
            group_dict[group] = add_data(group_dict[group], info, results_col)
    return pgdata, metadata
Esempio n. 3
0
def parse_csv_pg(fp, **kw):
    metadata = {}
    pgdata = {}
    header = False
    if 'header' in kw:
        info = kw['header'].split(',')
        metadata['pivot_name'] = info[0]
        metadata['grouping_col'] = info[1]
        metadata['column_names'] = ','.join(info[2:])
        header = True
    pivot_col = int(kw.get('pivot_col', 0))
    grouping_col = int(kw.get('grouping_col', 1))
    grouping_format = kw.get('grouping_format', False)
    results_col = [int(i.strip()) for i in kw.get('results_col','2').split(',')]
    for line in fp.readlines():
        info = line.split(',')
        if len(info) < 3:
            raise Exception("Not enough fields in line:\n%s" % line.strip())
        pivot = info[pivot_col]
        group = info[grouping_col]
        if not header:
            metadata['pivot_name'] = pivot
            metadata['grouping_name'] = group
            columns = ['Unknown']*len(results_col)
            for col in range(len(results_col)):
                columns[col] = info[results_col[col]]
            metadata['column_names'] = ','.join(columns)
            header = True
            continue
        if pivot not in pgdata:
            pgdata[pivot] = {}
        group_dict = pgdata.get(pivot, {})
        if grouping_format:
            #grouping_tuple = time.strptime('080413', grouping_format)
            grouping_tuple = time.strptime(group, grouping_format)
            group = datetime.datetime(*grouping_tuple[:6])
        if group not in group_dict:
            group_dict[group] = new_data(info, results_col)
        else:
            group_dict[group] = add_data(group_dict[group], info, results_col)
    return pgdata, metadata
Esempio n. 4
0
def parse_csv_p(fp, **kw):
    metadata = {}
    pdata = {}
    header = False
    pivot_col = int(kw.get('pivot_col', 0))
    results_col = [int(i.strip()) for i in kw.get('results_col','1').split(',')]
    for line in fp.readlines():
        info = line.split(',')
        if len(info) < 2:
            raise Exception("Not enough fields in line:\n%s" % line.strip())
        pivot = info[pivot_col]
        if not header:
            metadata['pivot_name'] = pivot
            columns = ['Unknown']*len(results_col)
            for col in range(len(results_col)):
                columns[col] = info[results_col[col]]
            metadata['column_names'] = ','.join(columns)
            header = True
            continue
        if pivot not in pdata:
            pdata[pivot] = new_data(info, results_col)
        else:
            pdata[pivot] = add_data(pdata[group], info, results_col)
    return pdata, metadata