def main(argv): if len(argv) == 0: sys.stderr.write( 'Missing input file.\nUsage: python create_irrigationtable.py <dir>\n' ) sys.exit(1) sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) for fn in argv: data, headers = p_xls.read_xls_data(fn) # find the right treatment columns: intersect two dicts treatment_column_names = [ item for item in headers if item in extra_column_names ] for column in treatment_column_names: for dobj in data: dobj.treatment_id = sql.get_value_id(column.replace('_', ' ')) columns_d_extra = columns_d.copy() columns_d_extra[column] = (3, 'value', float) sql.write_sql_table(data, columns_d_extra, table_name=TABLE_NAME) return None
def main(argv): parser = argparse.ArgumentParser(description='') parser.add_argument('-c', '--create_table', action='store_true', default=False, dest='create_table') parser.add_argument('files', nargs='+') parser.add_argument('--pages', default=1) args = parser.parse_args(argv) if args.create_table: sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) for fn in args.files: for page in range(int(args.pages)): data, headers = p_xls.read_xls_data(fn, page) # find the right treatment columns: intersect two lists treatment_column_names = [ item for item in headers if item in extra_column_names ] for column in treatment_column_names: data_to_keep = [] for dobj in data: if not hasattr(dobj, column): continue amount = getattr(dobj, column) if amount == None or amount == 0 or amount == '': continue dobj.treatment_id = sql.get_value_id( column.replace('_', ' ')) if dobj.StandortID == 4537: # auto fill the culture information for Golm cur_date = datetime.strptime(dobj.Datum, '%Y-%m-%d') if cur_date.year == 2011: setattr(dobj, 'Culture', 46150) elif cur_date.year == 2012: setattr(dobj, 'Culture', 56877) elif cur_date.year == 2013: setattr(dobj, 'Culture', 62328) else: print "Date not in range: %s" % dobj.Datum data_to_keep.append(dobj) columns_d_extra = columns_d.copy() columns_d_extra[column] = (3, 'amount', float) sql.write_sql_table(data_to_keep, columns_d_extra, table_name=TABLE_NAME, add_id=True) return None
def main(argv): if len(argv) == 0: sys.stderr.write('Missing input file.\nUsage: python create_subspeciestable.py <dir>\n') sys.exit(1) sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) dir_name = argv[0] fn = '%s/%s' % (dir_name, 'locations_with_geodata.xls') data, headers = p_xls.read_xls_data(fn) sql.write_sql_table(data, columns_d, table_name=TABLE_NAME) return None
def main(argv): parser = argparse.ArgumentParser(description='Process an xls table with location information') parser.add_argument('-c', '--create_table', action='store_true', dest='create_table', help='If set, creates a table definition as well', default=False) parser.add_argument('file') args = parser.parse_args(argv) if args.create_table: sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) data, headers = p_xls.read_xls_data(args.file) sql.write_sql_table(data, columns_d, table_name=TABLE_NAME, insert=False) return None
def main(argv): if len(argv) == 0: sys.stderr.write('Missing input file.\nUsage: python create_trmttable.py <filename>\n') sys.exit(1) sql.write_sql_header(DB_NAME, TREATMENT_TABLE_NAME, TREATMENT_TABLE) sheet_index=p_xls.DEFAULT_TREATMENT_ALIQUOT_INDEX fn = argv[0] data, headers = p_xls.read_xls_data(fn, sheet_index=sheet_index) # return None sql.write_sql_table(data, columns_d, table_name=TREATMENT_TABLE_NAME) return None
def main(argv): parser = argparse.ArgumentParser() parser.add_argument('-c', '--create_table', action='store_true', default=False, dest='create_table') parser.add_argument('file') args = parser.parse_args(argv) if args.create_table: sql.write_sql_header(DB_NAME, TREATMENT_TABLE_NAME, TREATMENT_TABLE) sheet_index=p_xls.DEFAULT_TREATMENT_ALIQUOT_INDEX data, headers = p_xls.read_xls_data(args.file, sheet_index=sheet_index) sql.write_sql_table(data, columns_d, table_name=TREATMENT_TABLE_NAME, add_id=True, insert=True) return None
def main(argv): if len(argv) == 0: sys.stderr.write('Missing input file.\nUsage: python create_subspeciestable.py <dir>\n') sys.exit(1) sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) dir_name = argv[0] fn = '%s/%s' % (dir_name, 'TROSTSorten20120217.xls') data, headers = p_xls.read_xls_data(fn) for dobj in data: dobj.species = DEFAULT_POTATO_ID sql.write_sql_table(data, columns_d, table_name=TABLE_NAME) return None
def main(argv): if len(argv) == 0: sys.stderr.write( 'Missing input file.\nUsage: python create_subspeciestable.py <dir>\n' ) sys.exit(1) sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) dir_name = argv[0] fn = '%s/%s' % (dir_name, 'locations_with_geodata.xls') data, headers = p_xls.read_xls_data(fn) sql.write_sql_table(data, columns_d, table_name=TABLE_NAME) return None
def main(argv): aliquots = ora_sql.get_aliquots_trost() if len(argv) > 0 and argv[0] == '-c': print "aliquot_id,plant_id,sample_date,amount,unit,organ" for aliquot in aliquots: print ','.join([ str(x) for x in [ aliquot['ALIQUOT_ID'], aliquot['U_ALIQUOT_LINK_A'], aliquot['CREATED_ON'], aliquot['AMOUNT'], aliquot['U_I_AMOUNT'], aliquot['U_ORGAN'] ] ] ) else: sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) for aliquot in aliquots: print """ INSERT INTO aliquots (id, aliquot, plantid, sample_date, amount, amount_unit, organ) VALUES (NULL, %d, %s, %s, %s, %s, %s); """.strip() % (aliquot['ALIQUOT_ID'], aliquot['U_ALIQUOT_LINK_A'], aliquot['CREATED_ON'], aliquot['AMOUNT'], aliquot['U_I_AMOUNT'], aliquot['U_ORGAN']) return None
def main(argv): if len(argv) == 0: sys.stderr.write( 'Missing input file.\nUsage: python create_starchtable.py <dir>\n') sys.exit(1) sql.write_sql_header(DB_NAME, YIELD_TABLE_NAME, YIELD_TABLE) sheet_index = p_xls.DEFAULT_PARCELLE_INDEX dir_name = argv[0] for fn in glob.glob('%s/%s' % (dir_name, 'TROST_Knollenernte*.xls')): data, headers = p_xls.read_xls_data(fn, sheet_index=sheet_index) data = annotate_locations(data) sql.write_sql_table(data, columns_d, table_name=YIELD_TABLE_NAME) return None
def main(argv): parser = argparse.ArgumentParser(description='Process an xls table with the subspecies information') parser.add_argument('-c', '--ceate_table', action='store_true', dest='create_table', help='If set, creates a table definition as well', default=False) parser.add_argument('file') args = parser.parse_args(argv) if args.create_table: sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) data, headers = p_xls.read_xls_data(args.file) for dobj in data: dobj.species = DEFAULT_POTATO_ID sql.write_sql_table(data, columns_d, table_name=TABLE_NAME, insert=False) pass
def main(argv): parser = argparse.ArgumentParser(description='') parser.add_argument('-c', '--create_table', action='store_true', dest='create_table', default=False) parser.add_argument('files', nargs='+') args = parser.parse_args(argv) if args.create_table: sql.write_sql_header(DB_NAME, YIELD_TABLE_NAME, YIELD_TABLE) sheet_index=p_xls.DEFAULT_PARCELLE_INDEX for fn in args.files: data, headers = p_xls.read_xls_data(fn, sheet_index=sheet_index) sql.write_sql_table(data, columns_d, table_name=YIELD_TABLE_NAME, insert=True, add_id=True) return None
def main(argv): if len(argv) == 0: sys.stderr.write('Missing input file.\nUsage: python create_starchtable.py <dir>\n') sys.exit(1) sql.write_sql_header(DB_NAME, YIELD_TABLE_NAME, YIELD_TABLE) sheet_index=p_xls.DEFAULT_PARCELLE_INDEX dir_name = argv[0] for fn in glob.glob('%s/%s'% (dir_name, 'TROST_Knollenernte*.xls')): data, headers = p_xls.read_xls_data(fn, sheet_index=sheet_index) data = annotate_locations(data) sql.write_sql_table(data, columns_d, table_name=YIELD_TABLE_NAME) return None
def main(argv): if len(argv) == 0: sys.stderr.write( 'Missing input file.\nUsage: python create_subspeciestable.py <dir>\n' ) sys.exit(1) sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) dir_name = argv[0] fn = '%s/%s' % (dir_name, 'TROSTSorten20120217.xls') data, headers = p_xls.read_xls_data(fn) for dobj in data: dobj.species = DEFAULT_POTATO_ID sql.write_sql_table(data, columns_d, table_name=TABLE_NAME) return None
def main(argv): if len(argv) == 0: sys.stderr.write('Missing input file.\nUsage: python create_plants2table.py <dir>\n') sys.exit(1) sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) dir_name = argv[0] fn = '%s/%s' % (dir_name, 'current_plants.xls') data, headers = p_xls.read_xls_data(fn) """ Some plants do not have a subspecies id - causing trouble further downstream. Hence, I inserted dummy into subspecies table: insert into subspecies values(NULL, -1, 1, 'UNKNOWN', NULL, NULL, NULL, NULL); """ for dobj in data: dobj.created = DEFAULT_DATE_STR if dobj.Subspecies_Id == '': dobj.Subspecies_Id = -1 """ Table writing logic is specific to this table, therefore it does not use sql.write_sql_table. """ for row in sql.prepare_sql_table(data, columns_d): # print row try: """ This adds the required values for subspecies.limsid and locations.limsid to the insert query. TODO: culture-id!, possibly sample-id! """ entry = [x[2](x[3]) for x in row[1:3] + row[5:7]] entry += (int(row[3][3]), int(row[4][3])) entry = tuple(entry) sys.stdout.write('%s\n' % (sql.INSERT_PLANTS2_STR % entry)) except: sys.stderr.write('EXC: %s\n' % row) sys.exit(1) return None
def main(argv): if len(argv) == 0: sys.stderr.write( 'Missing input file.\nUsage: python create_plants2table.py <dir>\n' ) sys.exit(1) sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) dir_name = argv[0] fn = '%s/%s' % (dir_name, 'current_plants.xls') data, headers = p_xls.read_xls_data(fn) """ Some plants do not have a subspecies id - causing trouble further downstream. Hence, I inserted dummy into subspecies table: insert into subspecies values(NULL, -1, 1, 'UNKNOWN', NULL, NULL, NULL, NULL); """ for dobj in data: dobj.created = DEFAULT_DATE_STR if dobj.Subspecies_Id == '': dobj.Subspecies_Id = -1 """ Table writing logic is specific to this table, therefore it does not use sql.write_sql_table. """ for row in sql.prepare_sql_table(data, columns_d): # print row try: """ This adds the required values for subspecies.limsid and locations.limsid to the insert query. TODO: culture-id!, possibly sample-id! """ entry = [x[2](x[3]) for x in row[1:3] + row[5:7]] entry += (int(row[3][3]), int(row[4][3])) entry = tuple(entry) sys.stdout.write('%s\n' % (sql.INSERT_PLANTS2_STR % entry)) except: sys.stderr.write('EXC: %s\n' % row) sys.exit(1) return None
def main(argv): if len(argv) == 0: sys.stderr.write('Missing input file.\nUsage: python create_subspeciestable.py <dir>\n') sys.exit(1) sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) dir_name = argv[0] fn = '%s/%s' % (dir_name, 'culture_data.xls') data, headers = p_xls.read_xls_data(fn) # return None for dobj in data: dobj.experiment_id = DEFAULT_EXPERIMENT_ID dobj.condition = '' dobj.created = DEFAULT_DATE_STR sql.write_sql_table(data, columns_d, table_name=TABLE_NAME) return None
def main(argv): parser = argparse.ArgumentParser(description='') parser.add_argument('-c', '--create_table', action='store_true', default=False, dest='create_table') parser.add_argument('files', nargs='+') parser.add_argument('--pages', default=1) args = parser.parse_args(argv) if args.create_table: sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) for fn in args.files: for page in range(int(args.pages)): data, headers = p_xls.read_xls_data(fn, page) # find the right treatment columns: intersect two lists treatment_column_names = [item for item in headers if item in extra_column_names] for column in treatment_column_names: data_to_keep = [] for dobj in data: if not hasattr(dobj, column): continue amount = getattr(dobj, column) if amount == None or amount == 0 or amount == '': continue dobj.treatment_id = sql.get_value_id(column.replace('_', ' ')) if dobj.StandortID == 4537: # auto fill the culture information for Golm cur_date = datetime.strptime(dobj.Datum, '%Y-%m-%d') if cur_date.year == 2011: setattr(dobj, 'Culture', 46150) elif cur_date.year == 2012: setattr(dobj, 'Culture', 56877) elif cur_date.year == 2013: setattr(dobj, 'Culture', 62328) else: print "Date not in range: %s" % dobj.Datum data_to_keep.append(dobj) columns_d_extra = columns_d.copy() columns_d_extra[ column ] = (3, 'amount', float) sql.write_sql_table(data_to_keep, columns_d_extra, table_name=TABLE_NAME, add_id=True) return None
def main(argv): parser = argparse.ArgumentParser( description='Process an xls table with location information') parser.add_argument('-c', '--create_table', action='store_true', dest='create_table', help='If set, creates a table definition as well', default=False) parser.add_argument('file') args = parser.parse_args(argv) if args.create_table: sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) data, headers = p_xls.read_xls_data(args.file) sql.write_sql_table(data, columns_d, table_name=TABLE_NAME, insert=False) return None
def main(argv): if len(argv) == 0: sys.stderr.write( 'Missing input file.\nUsage: python create_subspeciestable.py <dir>\n' ) sys.exit(1) sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) dir_name = argv[0] fn = '%s/%s' % (dir_name, 'culture_data.xls') data, headers = p_xls.read_xls_data(fn) # return None for dobj in data: dobj.experiment_id = DEFAULT_EXPERIMENT_ID dobj.condition = '' dobj.created = DEFAULT_DATE_STR sql.write_sql_table(data, columns_d, table_name=TABLE_NAME) return None
def main(argv): if len(argv) == 0: sys.stderr.write('Missing input file.\nUsage: python create_irrigationtable.py <dir>\n') sys.exit(1) sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) for fn in argv: data, headers = p_xls.read_xls_data(fn) # find the right treatment columns: intersect two dicts treatment_column_names = [item for item in headers if item in extra_column_names] for column in treatment_column_names: for dobj in data: dobj.treatment_id = sql.get_value_id(column.replace('_', ' ')) columns_d_extra = columns_d.copy() columns_d_extra[ column ] = (3, 'value', float) sql.write_sql_table(data, columns_d_extra, table_name=TABLE_NAME) return None
def main(argv): parser = argparse.ArgumentParser(description='Process an xls table with culture information') parser.add_argument('-c', '--create_table', action='store_true', dest='create_table', help='If set, creates a table definition as well', default=False) parser.add_argument('-d', '--database-import', action='store_true', dest='database', help='If set, replaces from LIMS instead of xls', default=False) parser.add_argument('file') args = parser.parse_args(argv) if args.create_table: sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) if args.database: import ora_sql #data = [ dict((k.lower(), v) for k,v in d.iteritems()) for d in ora_sql.get_all_cultures() ] data = [] header = [] ora_sql.set_formatting(False) all_cultures = ora_sql.get_all_cultures() # create the header if len(all_cultures) > 0: for k in all_cultures[0].keys(): header.append(k.lower()) # prepare the data for row in all_cultures: data.append(DO.DataObject(header, row.values())) ora_sql.set_formatting(True) global columns_d columns_d = dict((k.lower(), v) for k,v in columns_d.iteritems()) # need to lowercase the columns_d keys because Oracle ignore's my nice naming scheme for the data-keys. Making them fail to match up. else: data, headers = p_xls.read_xls_data(args.file) # return None for dobj in data: dobj.experiment_id = DEFAULT_EXPERIMENT_ID dobj.condition = '' dobj.created = DEFAULT_DATE_STR sql.write_sql_table(data, columns_d, table_name=TABLE_NAME, insert=False) return None
def main(argv): parser = argparse.ArgumentParser( description='Process an xls table with the subspecies information') parser.add_argument('-c', '--ceate_table', action='store_true', dest='create_table', help='If set, creates a table definition as well', default=False) parser.add_argument('file') args = parser.parse_args(argv) if args.create_table: sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) data, headers = p_xls.read_xls_data(args.file) for dobj in data: dobj.species = DEFAULT_POTATO_ID sql.write_sql_table(data, columns_d, table_name=TABLE_NAME, insert=False) pass
def main(argv): parser = argparse.ArgumentParser() parser.add_argument('-c', '--create_table', action='store_true', default=False, dest='create_table') parser.add_argument('file') args = parser.parse_args(argv) if args.create_table: sql.write_sql_header(DB_NAME, TREATMENT_TABLE_NAME, TREATMENT_TABLE) sheet_index = p_xls.DEFAULT_TREATMENT_ALIQUOT_INDEX data, headers = p_xls.read_xls_data(args.file, sheet_index=sheet_index) sql.write_sql_table(data, columns_d, table_name=TREATMENT_TABLE_NAME, add_id=True, insert=True) return None
def main(argv): aliquots = ora_sql.get_aliquots_trost() if len(argv) > 0 and argv[0] == '-c': print "aliquot_id,plant_id,sample_date,amount,unit,organ" for aliquot in aliquots: print ','.join([ str(x) for x in [ aliquot['ALIQUOT_ID'], aliquot['U_ALIQUOT_LINK_A'], aliquot['CREATED_ON'], aliquot['AMOUNT'], aliquot['U_I_AMOUNT'], aliquot['U_ORGAN'] ] ]) else: sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) for aliquot in aliquots: print """ INSERT INTO aliquots (id, aliquot, plantid, sample_date, amount, amount_unit, organ) VALUES (NULL, %d, %s, %s, %s, %s, %s); """.strip() % (aliquot['ALIQUOT_ID'], aliquot['U_ALIQUOT_LINK_A'], aliquot['CREATED_ON'], aliquot['AMOUNT'], aliquot['U_I_AMOUNT'], aliquot['U_ORGAN']) return None
def main(argv): parser = argparse.ArgumentParser(description='') parser.add_argument('-c', '--create_table', action='store_true', default=False, dest='create_table') parser.add_argument('files', nargs='+') args = parser.parse_args(argv) if args.create_table: sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) # create some dates d2011_04_21 = datetime.strptime('2011 04 21', '%Y %m %d') d2011_09_01 = datetime.strptime('2011 09 01', '%Y %m %d') d2012_04_17 = datetime.strptime('2012 04 17', '%Y %m %d') d2012_08_28 = datetime.strptime('2012 08 28', '%Y %m %d') d2013_04_22 = datetime.strptime('2013 04 22', '%Y %m %d') d2013_08_20 = datetime.strptime('2013 08 20', '%Y %m %d') for fn in args.files: data, headers = p_xls.read_xls_data(fn) # find the right treatment columns: intersect two lists treatment_column_names = [ item for item in headers if item in extra_column_names ] for column in treatment_column_names: data_to_keep = [] for dobj in data: # make sure we have the treatment column with a value if not hasattr(dobj, column): continue amount = getattr(dobj, column) if amount == None or amount == 0: continue try: amount = float(amount) except ValueError: continue # get the treatment id dobj.treatment_id = sql.get_value_id(column.replace('_', ' ')) # * we need to recalculate the amount based on some rules # * add default culture according to year cur_date = datetime.strptime(dobj.Datum, '%Y-%m-%d') if d2011_04_21 <= cur_date <= d2011_09_01: setattr(dobj, column, float(amount) * 0.7 * 4.4) setattr(dobj, 'Culture', 44443) elif d2012_04_17 <= cur_date <= d2012_08_28: setattr(dobj, column, float(amount) * 0.55 * 4.4) setattr(dobj, 'Culture', 56726) elif d2013_04_22 <= cur_date <= d2013_08_20: setattr(dobj, column, float(amount) * 0.7 * 4.4) setattr(dobj, 'Culture', 62326) else: print "Date not in range: %s" % dobj.Datum data_to_keep.append(dobj) columns_d_extra = columns_d.copy() columns_d_extra[column] = (3, 'amount', float) sql.write_sql_table(data_to_keep, columns_d_extra, table_name=TABLE_NAME, add_id=True) return None
def main(argv): parser = argparse.ArgumentParser(description='') parser.add_argument('-c', '--create_table', action='store_true', default=False, dest='create_table') parser.add_argument('files', nargs='+') args = parser.parse_args(argv) if args.create_table: sql.write_sql_header(DB_NAME, TABLE_NAME, TABLE) # create some dates d2011_04_21 = datetime.strptime('2011 04 21', '%Y %m %d') d2011_09_01 = datetime.strptime('2011 09 01', '%Y %m %d') d2012_04_17 = datetime.strptime('2012 04 17', '%Y %m %d') d2012_08_28 = datetime.strptime('2012 08 28', '%Y %m %d') d2013_04_22 = datetime.strptime('2013 04 22', '%Y %m %d') d2013_08_20 = datetime.strptime('2013 08 20', '%Y %m %d') for fn in args.files: data, headers = p_xls.read_xls_data(fn) # find the right treatment columns: intersect two lists treatment_column_names = [item for item in headers if item in extra_column_names] for column in treatment_column_names: data_to_keep = [] for dobj in data: # make sure we have the treatment column with a value if not hasattr(dobj, column): continue amount = getattr(dobj, column) if amount == None or amount == 0: continue try: amount = float(amount) except ValueError: continue # get the treatment id dobj.treatment_id = sql.get_value_id(column.replace('_', ' ')) # * we need to recalculate the amount based on some rules # * add default culture according to year cur_date = datetime.strptime(dobj.Datum, '%Y-%m-%d') if d2011_04_21 <= cur_date <= d2011_09_01: setattr(dobj, column, float(amount) * 0.7 * 4.4) setattr(dobj, 'Culture', 44443) elif d2012_04_17 <= cur_date <= d2012_08_28: setattr(dobj, column, float(amount) * 0.55 * 4.4) setattr(dobj, 'Culture', 56726) elif d2013_04_22 <= cur_date <= d2013_08_20: setattr(dobj, column, float(amount) * 0.7 * 4.4) setattr(dobj, 'Culture', 62326) else: print "Date not in range: %s" % dobj.Datum data_to_keep.append(dobj) columns_d_extra = columns_d.copy() columns_d_extra[ column ] = (3, 'amount', float) sql.write_sql_table(data_to_keep, columns_d_extra, table_name=TABLE_NAME, add_id=True) return None