Exemplo n.º 1
0
def main(args):
    db = login.get_db()

    # get all the file names and their associated users
    s = """
    select f.name AS filename, f.created as filecreated, p.name as username, p.id as userid
    from ufiles f
    join people p on p.id = f.person_id
    """
    c = db.cursor()
    c.execute(s)
    rs = c.fetchall()
    desc = [d[0] for d in c.description]
    rows = []
    if len(rs) > 0:
        rows = [dict(zip(desc, row)) for row in rs]

    for row in rows:
        full_filename = path + row['username'] + str(
            row['userid']) + '/' + row['filename']
        try:
            with open(full_filename):
                pass
        except IOError:
            print full_filename
Exemplo n.º 2
0
def main(argv):
    db = login.get_db()
    c = db.cursor()
    c.execute("SELECT id from plantlines order by id")
    rs = c.fetchall()
    lims_line_ids = dict((row[0], 1) for row in rs)
    for line_id in line_ids.keys():
        #if not line_ids.has_key(row[0]):
        #    print "%s has no lims line id!" % row[0]
        if not lims_line_ids.has_key(line_id):
            print line_id
Exemplo n.º 3
0
def main(argv):
    db = login.get_db()
    c = db.cursor()
    c.execute("SELECT id from plantlines order by id")
    rs = c.fetchall()
    lims_line_ids = dict((row[0], 1) for row in rs)
    for line_id in line_ids.keys():
        #if not line_ids.has_key(row[0]):
        #    print "%s has no lims line id!" % row[0]
        if not lims_line_ids.has_key(line_id):
            print line_id
Exemplo n.º 4
0
def main(argv):
    db = login.get_db('trost_phenotyping')
    #    q = """
    #SELECT pe.entity_id, e.parameter_dt, e.PO_id, definition FROM project_entity pe
    #JOIN test_mpiscore_entities e ON e.OrganID = pe.entity_id
    #WHERE pe.project_id = 1
    #"""
    q = """ SELECT OrganID, IF(char_length(parameter_dt) = 0, parameter, parameter_dt) AS parameter_dt, PO_id, definition, parameter FROM test_mpiscore_entities """
    db.query(q)
    rows = db.store_result().fetch_row(how=0, maxrows=0)
    for data in rows:
        stripped_data = []
        for d in data:
            if 'rstrip' in dir(
                    d):  # string-like object, ok, lets replace stuff!
                d = d.rstrip('\n').rstrip('\r')
            stripped_data.append(d)

        data = _format_entry(stripped_data)
        print """
        INSERT INTO `entities` (id, name, PO, definition)
        VALUES (%s, %s, %s, %s)
        ON DUPLICATE KEY UPDATE
        name = VALUES(name),
        PO = VALUES(PO),
        definition = VALUES(definition);
        """ % (data[0], data[1], data[2], data[3])

        # insert into the internationalization table: en_us
        print """
        INSERT INTO `i18n` (locale, model, foreign_key, field, content)
        VALUES ('en_us', 'Entity', %s, 'name', %s)
        ON DUPLICATE KEY UPDATE
        locale = VALUES(locale),
        model = VALUES(model),
        foreign_key = VALUES(foreign_key),
        field = VALUES(field),
        content = VALUES(content);
        """ % (data[0], data[4])

        # insert into the internationalization table: de_de
        print """
        INSERT INTO `i18n` (locale, model, foreign_key, field, content)
        VALUES ('de_de', 'Entity', %s, 'name', %s)
        ON DUPLICATE KEY UPDATE
        locale = VALUES(locale),
        model = VALUES(model),
        foreign_key = VALUES(foreign_key),
        field = VALUES(field),
        content = VALUES(content);
        """ % (data[0], data[1])
Exemplo n.º 5
0
def main(argv):
    c = login.get_db().cursor()
    c.execute("select distinct plants.id from plants left join live_plants on live_plants.id = plants.id left join dead_plants on dead_plants.id = plants.id where live_plants.id is NULL and dead_plants.id is null")
    poss_aliquots = c.fetchall()

    c = login.get_db().cursor()
    c.execute("select id from aliquots")
    aliquots = c.fetchall()
    aliquots = dict( # make the resultset into a dictionary: aliquot_id => whatever
        zip(
            map(lambda x: x[0], aliquots), # the resultset aliquots are one element tuples, so get that one element
            xrange(
                len(aliquots) # give me a meaningless list
            )
        )
    )

    cnt = 0 # the amount of poss_aliquots that is an aliquot
    for poss_aliquot in poss_aliquots:
        if aliquots.has_key(poss_aliquot):
            cnt += 1

    print "%d/%d" % (cnt, len(poss_aliquots))
Exemplo n.º 6
0
def main(argv):
    c = login.get_db().cursor()
    c.execute(
        "select distinct plants.id from plants left join live_plants on live_plants.id = plants.id left join dead_plants on dead_plants.id = plants.id where live_plants.id is NULL and dead_plants.id is null"
    )
    poss_aliquots = c.fetchall()

    c = login.get_db().cursor()
    c.execute("select id from aliquots")
    aliquots = c.fetchall()
    aliquots = dict(  # make the resultset into a dictionary: aliquot_id => whatever
        zip(
            map(
                lambda x: x[0], aliquots
            ),  # the resultset aliquots are one element tuples, so get that one element
            xrange(len(aliquots)  # give me a meaningless list
                   )))

    cnt = 0  # the amount of poss_aliquots that is an aliquot
    for poss_aliquot in poss_aliquots:
        if aliquots.has_key(poss_aliquot):
            cnt += 1

    print "%d/%d" % (cnt, len(poss_aliquots))
Exemplo n.º 7
0
def main(argv):
    db = login.get_db('trost_phenotyping')
    #    q = """
    #SELECT value_id, attribut_D, wert_D from project_value pv
    #JOIN test_mpiscore_values v on v.ValueID = pv.value_id
    #WHERE pv.project_id = 1
    #"""

    q = """
SELECT ValueID, attribut_D, wert_D, attribut_E, `value` FROM test_mpiscore_values
"""
    db.query(q)
    rows = db.store_result().fetch_row(how=0, maxrows=0)
    for data in rows:
        stripped_data = []
        for d in data:
            if 'rstrip' in dir(
                    d):  # string like object, ok, lets replace stuff!
                d = d.rstrip('\n').rstrip('\r')
            stripped_data.append(d)

        data = _format_entry(stripped_data)
        rs = """ INSERT INTO `values` (id, attribute, `value`) VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE attribute = VALUES(attribute), `value` = VALUES(`value`);
        """ % (data[0], data[1], data[2])

        # there is a unique constraint on local,model,foreign_key,field
        # meaning that when that is found, only content should be updated
        rs += """
        INSERT INTO `i18n` (locale, model, foreign_key, field, content) VALUES ('en_us', 'Value', %s, 'attribute', %s) ON DUPLICATE KEY UPDATE content=VALUES(content);
        """ % (data[0], data[3])

        rs += """
        INSERT INTO `i18n` (locale, model, foreign_key, field, content) VALUES ('de_de', 'Value', %s, 'attribute', %s) ON DUPLICATE KEY UPDATE content=VALUES(content);
        """ % (data[0], data[1])

        rs += """
        INSERT INTO `i18n` (locale, model, foreign_key, field, content) VALUES ('en_us', 'Value', %s, 'value', %s) ON DUPLICATE KEY UPDATE content=VALUES(content);
        """ % (data[0], data[4])

        rs += """
        INSERT INTO `i18n` (locale, model, foreign_key, field, content) VALUES ('de_de', 'Value', %s, 'value', %s) ON DUPLICATE KEY UPDATE content=VALUES(content);
        """ % (data[0], data[2])

        print rs.encode('utf-8')
Exemplo n.º 8
0
def main(argv):
    db = login.get_db('trost_phenotyping')
#    q = """
#SELECT value_id, attribut_D, wert_D from project_value pv
#JOIN test_mpiscore_values v on v.ValueID = pv.value_id
#WHERE pv.project_id = 1
#"""

    q = """
SELECT ValueID, attribut_D, wert_D, attribut_E, `value` FROM test_mpiscore_values
"""
    db.query(q)
    rows = db.store_result().fetch_row(how=0, maxrows=0)
    for data in rows:
        stripped_data = []
        for d in data:
            if 'rstrip' in dir(d): # string like object, ok, lets replace stuff!
                d = d.rstrip('\n').rstrip('\r')
            stripped_data.append(d)

        data = _format_entry(stripped_data)
        rs = """ INSERT INTO `values` (id, attribute, `value`) VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE attribute = VALUES(attribute), `value` = VALUES(`value`);
        """ % (data[0], data[1], data[2])

        # there is a unique constraint on local,model,foreign_key,field
        # meaning that when that is found, only content should be updated
        rs += """
        INSERT INTO `i18n` (locale, model, foreign_key, field, content) VALUES ('en_us', 'Value', %s, 'attribute', %s) ON DUPLICATE KEY UPDATE content=VALUES(content);
        """ % (data[0], data[3])

        rs += """
        INSERT INTO `i18n` (locale, model, foreign_key, field, content) VALUES ('de_de', 'Value', %s, 'attribute', %s) ON DUPLICATE KEY UPDATE content=VALUES(content);
        """ % (data[0], data[1])

        rs += """
        INSERT INTO `i18n` (locale, model, foreign_key, field, content) VALUES ('en_us', 'Value', %s, 'value', %s) ON DUPLICATE KEY UPDATE content=VALUES(content);
        """ % (data[0], data[4])

        rs += """
        INSERT INTO `i18n` (locale, model, foreign_key, field, content) VALUES ('de_de', 'Value', %s, 'value', %s) ON DUPLICATE KEY UPDATE content=VALUES(content);
        """ % (data[0], data[2])

        print rs.encode('utf-8')
Exemplo n.º 9
0
def main(args=None):
    """calls get_climate_data with arguments from the command line."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'input_file', type=argparse.FileType('r'),
        help=("tsv-file containing culture_id, flowering date, soil volume "
              "and field capacity"))
    parser.add_argument(
        'output_file', nargs='?', default=sys.stdout,
        type=argparse.FileType('w'),
        help=("tsv-file containing drought stress days (DSDs) (before/after "
              "flowering) OR control DSDs (before/after) and stress DSD "
              "(before/after), cold stress days (before/after flowering), "
              "heat stress days (before/after flowering) and light sum "
              "(before/after flowering). writes to STDOUT, if no filename "
              "is given."))
    args = parser.parse_args(sys.argv[1:])

    if not args.input_file:
        sys.exit(1)

    database = login.get_db()
    cursor = database.cursor()

    args.output_file.write(
        ('culture-id\tdrought-before\tdrought-after\tcontrol-drought-before'
         '\tcontrol-drought-after\tstress-drought-before\tstress-drought-after'
         '\tcold-before\tcold-after\theat-before\theat-after'
         '\tlight-before\tlight-after\n'))

    for i, line in enumerate(args.input_file, 1):
        try:
            climate_data = get_climate_data_from_str(cursor, line)
            args.output_file.write(format_climate_data(climate_data))
        except Exception as e:
            sys.stderr.write('line {} in file {} caused trouble: {}'.format(i, args.input_file.name, line))
            sys.stderr.write(traceback.format_exc())
Exemplo n.º 10
0
#!/usr/bin/python

import os
import sys
import math

import login
the_db = login.get_db()

import data_objects as DO
import compile_data as CD1
import compile_data2 as CD2
import queries

import write_table as WT

CONTROLLED_TRIALS = [4537, 5506]
FIELD_TRIALS = [5544, 5541, 5546, 5540, 5542, 5543, 5539, 5545]
DETHLINGEN_TRIALS = [5519]


###
def main(argv):

    heat_d, h2o_d = CD2.get_climate_data()
    the_db.query(queries.golm_starch_query)
    data = the_db.store_result().fetch_row(how=1, maxrows=9999999)

    print data[0]
    return None
    data = [DO.StarchData(d.keys(), d.values()) for d in data]
Exemplo n.º 11
0
#!/usr/bin/python

import os
import sys
import math

import _mysql

import login

the_db = login.get_db()

import data_objects as DO

CONTROLLED_TRIALS = [4537, 5506]
FIELD_TRIALS = [5544, 5541, 5546, 5540, 5542, 5543, 5539, 5545]
DETHLINGEN_TRIALS = [5519]

DROUGHT_ID = 170
DETHLINGEN_DROUGHT_IDS = (170, 172)
CONTROL_IDS = (169, 171)


#
def group_by_cultivar(data):
    grouped = {}
    for dobj in data:
        key = dobj.cultivar.upper()
        grouped[key] = grouped.get(key, []) + [dobj]
    return grouped
Exemplo n.º 12
0
#!/usr/bin/python

import os
import sys
import csv
import time
import math

import openpyxl

import login
TROST_DB = login.get_db()

cast_d = {'s': str, 'n': float}
"""
ERROR 1452 (23000): Cannot add or update a child row: a foreign key constraint fails (`trost_prod/phenotypes`, CONSTRAINT `fk_phenotypes_samples1` FOREIGN KEY (`sample_id`) REFERENCES `samples` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION)

"""
"""
LASTID in PHENOTYPES: 215937, 28436 rows in set (0.08 sec)

"""
"""
mysql> desc aliquots;
+-------------+--------------+------+-----+---------+----------------+
| Field       | Type         | Null | Key | Default | Extra          |
+-------------+--------------+------+-----+---------+----------------+
| id          | int(11)      | NO   | PRI | NULL    | auto_increment |
| aliquot     | int(11)      | YES  |     | NULL    |                |
| plantid     | int(11)      | YES  |     | NULL    |                |
| sample_date | date         | YES  |     | NULL    |                |