コード例 #1
0
def main(argv):

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('files', nargs='+')
    parser.add_argument('--standortid', type=int)
    parser.add_argument('--pages', default=1)
    args = parser.parse_args(argv)

    for fn in args.files:
        for page in range(int(args.pages)):
            data, headers = p_xls.read_xls_data(fn, page)

            data_to_keep = []
            for d in data:  # skip empty values
                amount = getattr(d, 'Regen_(mm)')
                if amount == None or amount == 0 or amount == '':
                    continue
                if args.standortid != None:
                    setattr(d, 'StandortID', args.standortid)
                data_to_keep.append(d)

            sql.write_sql_table(data_to_keep,
                                columns_d,
                                table_name=TABLE_NAME,
                                add_id=True)

    return None
コード例 #2
0
def main(argv):

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('files', nargs='+')
    parser.add_argument('--pages', default=1)
    parser.add_argument('--standortid', type=int)
    args = parser.parse_args(argv)

    for fn in args.files:
        for page in range(int(args.pages)):
            data, headers = p_xls.read_xls_data(fn, page)

            data_to_keep = []
            for d in data:
                tmin = getattr(d, 'Tmin_(°C)')
                tmax = getattr(d, 'Tmax_(°C)')
                if tmin != None and tmin != '' and tmax != None and tmax != '':
                    data_to_keep.append(d)
                if args.standortid != None:
                    setattr(d, 'StandortID', args.standortid)

            sql.write_sql_table(data_to_keep,
                                columns_d,
                                table_name=TABLE_NAME,
                                add_id=True)

    return None
コード例 #3
0
def main(argv):

    if len(argv) == 0:
        sys.stderr.write(
            'Missing input file.\nUsage: python create_irrigationtable.py <dir>\n'
        )
        sys.exit(1)

    sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)
    for fn in argv:
        data, headers = p_xls.read_xls_data(fn)

        # find the right treatment columns: intersect two dicts
        treatment_column_names = [
            item for item in headers if item in extra_column_names
        ]

        for column in treatment_column_names:
            for dobj in data:
                dobj.treatment_id = sql.get_value_id(column.replace('_', ' '))
            columns_d_extra = columns_d.copy()
            columns_d_extra[column] = (3, 'value', float)
            sql.write_sql_table(data, columns_d_extra, table_name=TABLE_NAME)

    return None
コード例 #4
0
ファイル: import_qpcr.py プロジェクト: ingkebil/trost
def main(args):
    fn = args[0] # get the filename

    # fill tables from the excel table
    print "set foreign_key_checks=0;"
    for page_nr in xrange(len(excel_pages)):
        page_name = excel_pages[ page_nr ]
        table_name = tablename_of[ page_name ]

        if (table_name is not None): # skip the pages we don't need
            if table_name == 'qpcr_pools':
                data, headers = process_xls.read_xls_data(fn, page_nr, include_time=True)
                for row in data:
                    setattr(row, 'measurement_time', getattr(row, 'measurement_time')[-8:])
                    setattr(row, 'measurement_date', getattr(row, 'measurement_date')[:10])
            else:
                data, headers = process_xls.read_xls_data(fn, page_nr)

            if table_name == 'qpcr_primers':
                data = [ row for row in data if getattr(row, 'selected') == 'YES' ]

            sql.write_sql_table(data,
                globals()[page_name], # get the array with the column based on the page name
                table_name=table_name)

    print "set foreign_key_checks=1;"
コード例 #5
0
def main(args):
    fn = args[0]  # get the filename

    # fill tables from the excel table
    print "set foreign_key_checks=0;"
    for page_nr in xrange(len(excel_pages)):
        page_name = excel_pages[page_nr]
        table_name = tablename_of[page_name]

        if (table_name is not None):  # skip the pages we don't need
            if table_name == 'qpcr_pools':
                data, headers = process_xls.read_xls_data(fn,
                                                          page_nr,
                                                          include_time=True)
                for row in data:
                    setattr(row, 'measurement_time',
                            getattr(row, 'measurement_time')[-8:])
                    setattr(row, 'measurement_date',
                            getattr(row, 'measurement_date')[:10])
            else:
                data, headers = process_xls.read_xls_data(fn, page_nr)

            if table_name == 'qpcr_primers':
                data = [
                    row for row in data if getattr(row, 'selected') == 'YES'
                ]

            sql.write_sql_table(
                data,
                globals()
                [page_name],  # get the array with the column based on the page name
                table_name=table_name)

    print "set foreign_key_checks=1;"
コード例 #6
0
ファイル: import_climate_LWK.py プロジェクト: ingkebil/trost
def main(argv):

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('files', nargs='+')
    args = parser.parse_args(argv)
    
    for fn in args.files:
        for page in range(5): 
            data, headers  = p_xls.read_xls_data(fn, page)
            sql.write_sql_table(data, columns_d, table_name=TABLE_NAME, add_id=True)

    return None
コード例 #7
0
def main(argv):

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('-c',
                        '--create_table',
                        action='store_true',
                        default=False,
                        dest='create_table')
    parser.add_argument('files', nargs='+')
    parser.add_argument('--pages', default=1)
    args = parser.parse_args(argv)

    if args.create_table:
        sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)
    for fn in args.files:
        for page in range(int(args.pages)):
            data, headers = p_xls.read_xls_data(fn, page)

            # find the right treatment columns: intersect two lists
            treatment_column_names = [
                item for item in headers if item in extra_column_names
            ]

            for column in treatment_column_names:
                data_to_keep = []
                for dobj in data:
                    if not hasattr(dobj, column): continue
                    amount = getattr(dobj, column)
                    if amount == None or amount == 0 or amount == '': continue
                    dobj.treatment_id = sql.get_value_id(
                        column.replace('_', ' '))

                    if dobj.StandortID == 4537:  # auto fill the culture information for Golm
                        cur_date = datetime.strptime(dobj.Datum, '%Y-%m-%d')
                        if cur_date.year == 2011:
                            setattr(dobj, 'Culture', 46150)
                        elif cur_date.year == 2012:
                            setattr(dobj, 'Culture', 56877)
                        elif cur_date.year == 2013:
                            setattr(dobj, 'Culture', 62328)
                        else:
                            print "Date not in range: %s" % dobj.Datum

                    data_to_keep.append(dobj)

                columns_d_extra = columns_d.copy()
                columns_d_extra[column] = (3, 'amount', float)
                sql.write_sql_table(data_to_keep,
                                    columns_d_extra,
                                    table_name=TABLE_NAME,
                                    add_id=True)

    return None
コード例 #8
0
def main(argv):
    
    if len(argv) == 0:
        sys.stderr.write('Missing input file.\nUsage: python create_subspeciestable.py <dir>\n')
        sys.exit(1)
    
    sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)
    dir_name = argv[0]
    fn = '%s/%s' % (dir_name, 'locations_with_geodata.xls')
    data, headers  = p_xls.read_xls_data(fn)
    sql.write_sql_table(data, columns_d, table_name=TABLE_NAME)   

    return None
コード例 #9
0
def main(argv):

    parser = argparse.ArgumentParser(description='Process an xls table with location information')
    parser.add_argument('-c', '--create_table', action='store_true', dest='create_table', help='If set, creates a table definition as well', default=False)
    parser.add_argument('file')
    args = parser.parse_args(argv)

    if args.create_table:
        sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)
    data, headers  = p_xls.read_xls_data(args.file)
    sql.write_sql_table(data, columns_d, table_name=TABLE_NAME, insert=False)   

    return None
コード例 #10
0
def main(argv):

    if len(argv) == 0:
        sys.stderr.write(
            'Missing input file.\nUsage: python create_subspeciestable.py <dir>\n'
        )
        sys.exit(1)

    sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)
    dir_name = argv[0]
    fn = '%s/%s' % (dir_name, 'locations_with_geodata.xls')
    data, headers = p_xls.read_xls_data(fn)
    sql.write_sql_table(data, columns_d, table_name=TABLE_NAME)

    return None
コード例 #11
0
def main(argv):
    
    if len(argv) == 0:
        sys.stderr.write('Missing input file.\nUsage: python create_subspeciestable.py <dir>\n')
        sys.exit(1)
    
    sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)
    dir_name = argv[0]
    fn = '%s/%s' % (dir_name, 'TROSTSorten20120217.xls')
    data, headers  = p_xls.read_xls_data(fn)
    for dobj in data:
        dobj.species = DEFAULT_POTATO_ID
    sql.write_sql_table(data, columns_d, table_name=TABLE_NAME)   

    return None
コード例 #12
0
def main(argv):

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('files', nargs='+')
    args = parser.parse_args(argv)

    for fn in args.files:
        for page in range(1):
            data, headers = p_xls.read_xls_data(fn, page)
            sql.write_sql_table(data,
                                columns_d,
                                table_name=TABLE_NAME,
                                add_id=True)

    return None
コード例 #13
0
def main(argv):
    
    if len(argv) == 0:
        sys.stderr.write('Missing input file.\nUsage: python create_trmttable.py <filename>\n')
        sys.exit(1)

    sql.write_sql_header(DB_NAME, TREATMENT_TABLE_NAME, TREATMENT_TABLE)
    sheet_index=p_xls.DEFAULT_TREATMENT_ALIQUOT_INDEX

    fn = argv[0]
    data, headers  = p_xls.read_xls_data(fn, sheet_index=sheet_index) 
    # return None
    sql.write_sql_table(data, columns_d, 
                        table_name=TREATMENT_TABLE_NAME)
    return None
コード例 #14
0
ファイル: create_trmttable.py プロジェクト: ingkebil/trost
def main(argv):

    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--create_table', action='store_true', default=False, dest='create_table')
    parser.add_argument('file')
    args = parser.parse_args(argv)

    if args.create_table:
        sql.write_sql_header(DB_NAME, TREATMENT_TABLE_NAME, TREATMENT_TABLE)

    sheet_index=p_xls.DEFAULT_TREATMENT_ALIQUOT_INDEX

    data, headers  = p_xls.read_xls_data(args.file, sheet_index=sheet_index) 
    sql.write_sql_table(data, columns_d, table_name=TREATMENT_TABLE_NAME, add_id=True, insert=True)
    return None
コード例 #15
0
ファイル: create_starchtable.py プロジェクト: cschu/QSolanum
def main(argv):
    
    if len(argv) == 0:
        sys.stderr.write('Missing input file.\nUsage: python create_starchtable.py <dir>\n')
        sys.exit(1)
    
    sql.write_sql_header(DB_NAME, YIELD_TABLE_NAME, YIELD_TABLE)
    sheet_index=p_xls.DEFAULT_PARCELLE_INDEX 
    dir_name = argv[0]
    for fn in glob.glob('%s/%s'% (dir_name, 'TROST_Knollenernte*.xls')):
        data, headers  = p_xls.read_xls_data(fn, sheet_index=sheet_index)
        data = annotate_locations(data)
        sql.write_sql_table(data, columns_d, table_name=YIELD_TABLE_NAME)
    

    return None
コード例 #16
0
ファイル: create_starchtable.py プロジェクト: ingkebil/trost
def main(argv):

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('-c', '--create_table', action='store_true', dest='create_table', default=False)
    parser.add_argument('files', nargs='+')
    args = parser.parse_args(argv)
    
    if args.create_table:
        sql.write_sql_header(DB_NAME, YIELD_TABLE_NAME, YIELD_TABLE)
    sheet_index=p_xls.DEFAULT_PARCELLE_INDEX 
    for fn in args.files:
        data, headers  = p_xls.read_xls_data(fn, sheet_index=sheet_index)
        sql.write_sql_table(data, columns_d, table_name=YIELD_TABLE_NAME, insert=True, add_id=True)
    

    return None
コード例 #17
0
def main(argv):

    parser = argparse.ArgumentParser(description='Process an xls table with the subspecies information')
    parser.add_argument('-c', '--ceate_table', action='store_true', dest='create_table', help='If set, creates a table definition as well', default=False)
    parser.add_argument('file')
    args = parser.parse_args(argv)

    if args.create_table:
        sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)

    data, headers  = p_xls.read_xls_data(args.file)
    for dobj in data:
        dobj.species = DEFAULT_POTATO_ID
    sql.write_sql_table(data, columns_d, table_name=TABLE_NAME, insert=False)   

    pass
コード例 #18
0
def main(argv):

    if len(argv) == 0:
        sys.stderr.write(
            'Missing input file.\nUsage: python create_starchtable.py <dir>\n')
        sys.exit(1)

    sql.write_sql_header(DB_NAME, YIELD_TABLE_NAME, YIELD_TABLE)
    sheet_index = p_xls.DEFAULT_PARCELLE_INDEX
    dir_name = argv[0]
    for fn in glob.glob('%s/%s' % (dir_name, 'TROST_Knollenernte*.xls')):
        data, headers = p_xls.read_xls_data(fn, sheet_index=sheet_index)
        data = annotate_locations(data)
        sql.write_sql_table(data, columns_d, table_name=YIELD_TABLE_NAME)

    return None
コード例 #19
0
def main(argv):

    if len(argv) == 0:
        sys.stderr.write(
            'Missing input file.\nUsage: python create_subspeciestable.py <dir>\n'
        )
        sys.exit(1)

    sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)
    dir_name = argv[0]
    fn = '%s/%s' % (dir_name, 'TROSTSorten20120217.xls')
    data, headers = p_xls.read_xls_data(fn)
    for dobj in data:
        dobj.species = DEFAULT_POTATO_ID
    sql.write_sql_table(data, columns_d, table_name=TABLE_NAME)

    return None
コード例 #20
0
ファイル: create_culturetable.py プロジェクト: cschu/QSolanum
def main(argv):
    
    if len(argv) == 0:
        sys.stderr.write('Missing input file.\nUsage: python create_subspeciestable.py <dir>\n')
        sys.exit(1)
    
    sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)
    dir_name = argv[0]
    fn = '%s/%s' % (dir_name, 'culture_data.xls')
    data, headers  = p_xls.read_xls_data(fn)
    # return None
    for dobj in data:
        dobj.experiment_id = DEFAULT_EXPERIMENT_ID
        dobj.condition = ''
        dobj.created = DEFAULT_DATE_STR
    sql.write_sql_table(data, columns_d, table_name=TABLE_NAME)   

    return None
コード例 #21
0
def main(argv):

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('-c', '--create_table', action='store_true', default=False, dest='create_table')
    parser.add_argument('files', nargs='+')
    parser.add_argument('--pages', default=1)
    args = parser.parse_args(argv)
    
    if args.create_table:
        sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)
    for fn in args.files:
        for page in range(int(args.pages)): 
            data, headers  = p_xls.read_xls_data(fn, page)

            # find the right treatment columns: intersect two lists 
            treatment_column_names = [item for item in headers if item in extra_column_names]

            for column in treatment_column_names:
                data_to_keep = []
                for dobj in data:
                    if not hasattr(dobj, column): continue
                    amount = getattr(dobj, column)
                    if amount == None or amount == 0 or amount == '': continue
                    dobj.treatment_id = sql.get_value_id(column.replace('_', ' '))

                    if dobj.StandortID == 4537: # auto fill the culture information for Golm
                        cur_date = datetime.strptime(dobj.Datum, '%Y-%m-%d')
                        if cur_date.year == 2011:
                            setattr(dobj, 'Culture', 46150)
                        elif cur_date.year == 2012:
                            setattr(dobj, 'Culture', 56877)
                        elif cur_date.year == 2013:
                            setattr(dobj, 'Culture', 62328)
                        else:
                            print "Date not in range: %s" % dobj.Datum

                    data_to_keep.append(dobj)

                columns_d_extra = columns_d.copy()
                columns_d_extra[ column ] = (3, 'amount', float)
                sql.write_sql_table(data_to_keep, columns_d_extra, table_name=TABLE_NAME, add_id=True)

    return None
コード例 #22
0
def main(argv):

    parser = argparse.ArgumentParser(
        description='Process an xls table with location information')
    parser.add_argument('-c',
                        '--create_table',
                        action='store_true',
                        dest='create_table',
                        help='If set, creates a table definition as well',
                        default=False)
    parser.add_argument('file')
    args = parser.parse_args(argv)

    if args.create_table:
        sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)
    data, headers = p_xls.read_xls_data(args.file)
    sql.write_sql_table(data, columns_d, table_name=TABLE_NAME, insert=False)

    return None
コード例 #23
0
ファイル: create_culturetable.py プロジェクト: cschu/QSolanum
def main(argv):

    if len(argv) == 0:
        sys.stderr.write(
            'Missing input file.\nUsage: python create_subspeciestable.py <dir>\n'
        )
        sys.exit(1)

    sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)
    dir_name = argv[0]
    fn = '%s/%s' % (dir_name, 'culture_data.xls')
    data, headers = p_xls.read_xls_data(fn)
    # return None
    for dobj in data:
        dobj.experiment_id = DEFAULT_EXPERIMENT_ID
        dobj.condition = ''
        dobj.created = DEFAULT_DATE_STR
    sql.write_sql_table(data, columns_d, table_name=TABLE_NAME)

    return None
コード例 #24
0
def main(argv):
    
    if len(argv) == 0:
        sys.stderr.write('Missing input file.\nUsage: python create_irrigationtable.py <dir>\n')
        sys.exit(1)
    
    sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)
    for fn in argv:
        data, headers  = p_xls.read_xls_data(fn)

        # find the right treatment columns: intersect two dicts
        treatment_column_names = [item for item in headers if item in extra_column_names]

        for column in treatment_column_names:
            for dobj in data:
                dobj.treatment_id = sql.get_value_id(column.replace('_', ' '))
            columns_d_extra = columns_d.copy()
            columns_d_extra[ column ] = (3, 'value', float)
            sql.write_sql_table(data, columns_d_extra, table_name=TABLE_NAME)   

    return None
コード例 #25
0
def main(argv):

    parser = argparse.ArgumentParser(
        description='Process an xls table with the subspecies information')
    parser.add_argument('-c',
                        '--ceate_table',
                        action='store_true',
                        dest='create_table',
                        help='If set, creates a table definition as well',
                        default=False)
    parser.add_argument('file')
    args = parser.parse_args(argv)

    if args.create_table:
        sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)

    data, headers = p_xls.read_xls_data(args.file)
    for dobj in data:
        dobj.species = DEFAULT_POTATO_ID
    sql.write_sql_table(data, columns_d, table_name=TABLE_NAME, insert=False)

    pass
コード例 #26
0
ファイル: create_culturetable.py プロジェクト: ingkebil/trost
def main(argv):

    parser = argparse.ArgumentParser(description='Process an xls table with culture information')
    parser.add_argument('-c', '--create_table', action='store_true', dest='create_table', help='If set, creates a table definition as well', default=False)
    parser.add_argument('-d', '--database-import', action='store_true', dest='database', help='If set, replaces from LIMS instead of xls', default=False)
    parser.add_argument('file')
    args = parser.parse_args(argv)
    
    if args.create_table:
        sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)

    if args.database:
        import ora_sql
        #data = [ dict((k.lower(), v) for k,v in d.iteritems()) for d in ora_sql.get_all_cultures() ]
        data = []
        header = []
        ora_sql.set_formatting(False)
        all_cultures = ora_sql.get_all_cultures()
        # create the header
        if len(all_cultures) > 0:
            for k in all_cultures[0].keys():
                header.append(k.lower())
        # prepare the data
        for row in all_cultures:
            data.append(DO.DataObject(header, row.values()))
        ora_sql.set_formatting(True)

        global columns_d
        columns_d = dict((k.lower(), v) for k,v in columns_d.iteritems()) # need to lowercase the columns_d keys because Oracle ignore's my nice naming scheme for the data-keys. Making them fail to match up.
    else:
        data, headers  = p_xls.read_xls_data(args.file)
        # return None
    for dobj in data:
        dobj.experiment_id = DEFAULT_EXPERIMENT_ID
        dobj.condition = ''
        dobj.created = DEFAULT_DATE_STR
    sql.write_sql_table(data, columns_d, table_name=TABLE_NAME, insert=False)   

    return None
コード例 #27
0
ファイル: create_trmttable.py プロジェクト: ingkebil/trost
def main(argv):

    parser = argparse.ArgumentParser()
    parser.add_argument('-c',
                        '--create_table',
                        action='store_true',
                        default=False,
                        dest='create_table')
    parser.add_argument('file')
    args = parser.parse_args(argv)

    if args.create_table:
        sql.write_sql_header(DB_NAME, TREATMENT_TABLE_NAME, TREATMENT_TABLE)

    sheet_index = p_xls.DEFAULT_TREATMENT_ALIQUOT_INDEX

    data, headers = p_xls.read_xls_data(args.file, sheet_index=sheet_index)
    sql.write_sql_table(data,
                        columns_d,
                        table_name=TREATMENT_TABLE_NAME,
                        add_id=True,
                        insert=True)
    return None
コード例 #28
0
def main(argv):

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('files', nargs='+')
    parser.add_argument('--pages', default=1)
    parser.add_argument('--standortid', type=int)
    args = parser.parse_args(argv)

    for fn in args.files:
        for page in range(int(args.pages)): 
            data, headers  = p_xls.read_xls_data(fn, page)

            data_to_keep = []
            for d in data:
                tmin = getattr(d, 'Tmin_(°C)')
                tmax = getattr(d, 'Tmax_(°C)')
                if tmin != None and tmin != '' and tmax != None and tmax != '':
                    data_to_keep.append(d)
                if args.standortid != None:
                    setattr(d, 'StandortID', args.standortid)

            sql.write_sql_table(data_to_keep, columns_d, table_name=TABLE_NAME, add_id=True)

    return None
コード例 #29
0
def main(argv):

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('files', nargs='+')
    parser.add_argument('--standortid', type=int)
    parser.add_argument('--pages', default=1)
    args = parser.parse_args(argv)

    for fn in args.files:
        for page in range(int(args.pages)): 
            data, headers  = p_xls.read_xls_data(fn, page)

            data_to_keep = []
            for d in data: # skip empty values
                amount = getattr(d, 'Regen_(mm)')
                if amount == None or amount == 0 or amount == '':
                    continue
                if args.standortid != None:
                    setattr(d, 'StandortID', args.standortid)
                data_to_keep.append(d)

            sql.write_sql_table(data_to_keep, columns_d, table_name=TABLE_NAME, add_id=True)

    return None
コード例 #30
0
def main(argv):

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('-c',
                        '--create_table',
                        action='store_true',
                        default=False,
                        dest='create_table')
    parser.add_argument('files', nargs='+')
    args = parser.parse_args(argv)

    if args.create_table:
        sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)

    # create some dates
    d2011_04_21 = datetime.strptime('2011 04 21', '%Y %m %d')
    d2011_09_01 = datetime.strptime('2011 09 01', '%Y %m %d')
    d2012_04_17 = datetime.strptime('2012 04 17', '%Y %m %d')
    d2012_08_28 = datetime.strptime('2012 08 28', '%Y %m %d')
    d2013_04_22 = datetime.strptime('2013 04 22', '%Y %m %d')
    d2013_08_20 = datetime.strptime('2013 08 20', '%Y %m %d')

    for fn in args.files:
        data, headers = p_xls.read_xls_data(fn)

        # find the right treatment columns: intersect two lists
        treatment_column_names = [
            item for item in headers if item in extra_column_names
        ]

        for column in treatment_column_names:
            data_to_keep = []
            for dobj in data:
                # make sure we have the treatment column with a value
                if not hasattr(dobj, column): continue
                amount = getattr(dobj, column)
                if amount == None or amount == 0: continue
                try:
                    amount = float(amount)
                except ValueError:
                    continue

                # get the treatment id
                dobj.treatment_id = sql.get_value_id(column.replace('_', ' '))

                # * we need to recalculate the amount based on some rules
                # * add default culture according to year
                cur_date = datetime.strptime(dobj.Datum, '%Y-%m-%d')
                if d2011_04_21 <= cur_date <= d2011_09_01:
                    setattr(dobj, column, float(amount) * 0.7 * 4.4)
                    setattr(dobj, 'Culture', 44443)
                elif d2012_04_17 <= cur_date <= d2012_08_28:
                    setattr(dobj, column, float(amount) * 0.55 * 4.4)
                    setattr(dobj, 'Culture', 56726)
                elif d2013_04_22 <= cur_date <= d2013_08_20:
                    setattr(dobj, column, float(amount) * 0.7 * 4.4)
                    setattr(dobj, 'Culture', 62326)
                else:
                    print "Date not in range: %s" % dobj.Datum

                data_to_keep.append(dobj)
            columns_d_extra = columns_d.copy()
            columns_d_extra[column] = (3, 'amount', float)
            sql.write_sql_table(data_to_keep,
                                columns_d_extra,
                                table_name=TABLE_NAME,
                                add_id=True)

    return None
コード例 #31
0
def main(argv):

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('-c', '--create_table', action='store_true', default=False, dest='create_table')
    parser.add_argument('files', nargs='+')
    args = parser.parse_args(argv)
    
    if args.create_table:
        sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE)

    # create some dates
    d2011_04_21 = datetime.strptime('2011 04 21', '%Y %m %d')
    d2011_09_01 = datetime.strptime('2011 09 01', '%Y %m %d')
    d2012_04_17 = datetime.strptime('2012 04 17', '%Y %m %d')
    d2012_08_28 = datetime.strptime('2012 08 28', '%Y %m %d')
    d2013_04_22 = datetime.strptime('2013 04 22', '%Y %m %d')
    d2013_08_20 = datetime.strptime('2013 08 20', '%Y %m %d')

    for fn in args.files:
        data, headers  = p_xls.read_xls_data(fn)

        # find the right treatment columns: intersect two lists 
        treatment_column_names = [item for item in headers if item in extra_column_names]

        for column in treatment_column_names:
            data_to_keep = []
            for dobj in data:
                # make sure we have the treatment column with a value
                if not hasattr(dobj, column): continue
                amount = getattr(dobj, column)
                if amount == None or amount == 0: continue
                try:
                    amount = float(amount)
                except ValueError:
                    continue

                # get the treatment id
                dobj.treatment_id = sql.get_value_id(column.replace('_', ' '))

                # * we need to recalculate the amount based on some rules
                # * add default culture according to year
                cur_date = datetime.strptime(dobj.Datum, '%Y-%m-%d')
                if d2011_04_21 <= cur_date <= d2011_09_01:
                    setattr(dobj, column, float(amount) * 0.7 * 4.4)
                    setattr(dobj, 'Culture', 44443)
                elif d2012_04_17 <= cur_date <= d2012_08_28:
                    setattr(dobj, column, float(amount) * 0.55 * 4.4)
                    setattr(dobj, 'Culture', 56726)
                elif d2013_04_22 <= cur_date <= d2013_08_20:
                    setattr(dobj, column, float(amount) * 0.7 * 4.4)
                    setattr(dobj, 'Culture', 62326)
                else:
                    print "Date not in range: %s" % dobj.Datum



                data_to_keep.append(dobj)
            columns_d_extra = columns_d.copy()
            columns_d_extra[ column ] = (3, 'amount', float)
            sql.write_sql_table(data_to_keep, columns_d_extra, table_name=TABLE_NAME, add_id=True)

    return None