Exemple #1
0
def read_integrated_data(filename, listOfEntities, dataSet=None):
    """
    DEPRECTATED! Just for reminiscing in the past... :)

    Reads the results of a Fortran-BioParkin run.

    The list of SBML entities is used to associate columns in the entityData file
    with their corresponding entityData entities.

    format:
    C{[time [WHITESPACE] par 1 [WHITESPACE] par n}

    e.g.:
    C{0.0000000000000000E+00     1.4999999999999999E-02    0.0000000000000000E+00}

    """

    try:
        csv.register_dialect("SimulationData", delimiter=' ', quotechar='"', skipinitialspace=True)
        reader = csv.reader(open(filename), dialect="SimulationData")
        time = []
        values = {}
        for (i, row) in enumerate(reader):  # we don't use the i... yet

            for (j, value) in enumerate(row):
                if j == 0:  # jump over the 0th row
                    time.append(value) # time value is always in the 0th row
                    continue

                columnID = j - 1 # possibility to compensate for column offset
                entity = listOfEntities[columnID]
                if values.has_key(entity):
                    values[entity].append(value) # append to existing list
                else:
                    values[entity] = [value] # create list for the first time

        # create final data structure
        dataMap = {}
        if len(values) > 0: # if we have dataMap
            for entity in values.keys():
                entityData = EntityData()
                if dataSet:
                    entityData.setAssociatedDataSet(dataSet)
                entityData.dataDescriptors = time
                entityData.datapoints = values[entity]
                entityData.type = entitydata.TYPE_SIMULATED
                entityData.sbmlEntity = entity.wrappedEntity
                entityData.originalFilename = filename
                dataMap[entity] = entityData
            return dataMap

    except Exception, e:
        logging.error("Error while trying to read simulation dataMap file: %s" % e)
Exemple #2
0
def read_integrated_data(filename, listOfEntities, dataSet=None):
    '''
    Reads the results of a Fortran-BioParkin run.
    
    The list of SBML entities is used to associate columns in the entityData file
    with their corresponding entityData entities.
        
    format:
    C{[time [WHITESPACE] par 1 [WHITESPACE] par n}
    
    e.g.:
    C{0.0000000000000000E+00     1.4999999999999999E-02    0.0000000000000000E+00}
    
    '''

    try:
        csv.register_dialect("SimulationData",
                             delimiter=' ',
                             quotechar='"',
                             skipinitialspace=True)
        reader = csv.reader(open(filename), dialect="SimulationData")
        time = []
        values = {}
        for (i, row) in enumerate(reader):  # we don't use the i... yet

            for (j, value) in enumerate(row):
                if j == 0:  # jump over the 0th row
                    time.append(value)  # time value is always in the 0th row
                    continue

                columnID = j - 1  # possibility to compensate for column offset
                entity = listOfEntities[columnID]
                if values.has_key(entity):
                    values[entity].append(value)  # append to existing list
                else:
                    values[entity] = [value]  # create list for the first time

        # create final data structure
        dataMap = {}
        if len(values) > 0:  # if we have dataMap
            for entity in values.keys():
                entityData = EntityData()
                if dataSet:
                    entityData.setAssociatedDataSet(dataSet)
                entityData.dataDescriptors = time
                entityData.datapoints = values[entity]
                entityData.type = entitydata.TYPE_SIMULATED
                entityData.sbmlEntity = entity.wrappedEntity
                entityData.originalFilename = filename
                dataMap[entity] = entityData
            return dataMap

    except Exception, e:
        logging.error(
            "Error while trying to read simulation dataMap file: %s" % e)


#def read_integrated_data_old(filename):
#    '''
#    OLD VERSION, DON'T USE!
#    Only for looking up old code!
#
#    Reads the results of a Fortran-BioParkin run.
#
#    input format, e.g. see:
#    /home/bzfwadem/workspace/BioParkin/compute/temp/P_gnrh_rec4_Solution.dat
#
#    format:
#    time \t par 1 \t par n
#
#    '''
#    data = {}
#
#    #with csv.reader(open(filename), delimiter=' ', quotechar='"') as reader:
#    try:
#        csv.register_dialect("SimulationData", delimiter=' ', quotechar='"', skipinitialspace=True)
#        reader = csv.reader(open(filename), dialect="SimulationData")
#        time = []
#        values = {}
#        for (i, row) in enumerate(reader):  # we don't use the i... yet
#
#            for (j, value) in enumerate(row):
#                if j == 0:  # jump over the 0th row
#                    time.append(value) # time value is always in the 0th row
#                    continue
#
#                columnID = j # possibility to compensate for column offset
#                # we will use the column index as the key
#                # TODO: We need something better than the column index. Column indices should be matched to previously loaded experimental data (or parts of the SBML model?)
#                if values.has_key(columnID):
#                    values[columnID].append(value) # append to existing list
#                else:
#                    values[columnID] = [value] # create list for the first time
#
#        # create final data structure
#        if len(values) > 0: # if we have data
#            for dataID in values.keys():
#                #if not data.has_key(dataID):
#                entry = {services.dataservice.TIME: time,
#                         services.dataservice.VALUE: values[dataID]}
#                data[str(dataID)] = entry   #str: because the experimental data's keys are strings
#                #else:
#                #    entry = data[dataID]
#            #                entry[Services.DataService.TIME] = time
#            #                entry[Services.DataService.VALUE].append(value)
#
#    except Exception, e:
#        logging.error("Error while trying to read simulation data file: %s" % e)
#    #    finally:
#    #        reader.close() # does not exist and does not seem to be necessary
#
#
#    #
#    #    dictReader = csv.DictReader(open(filename), ["time", range(nrCols)-1])
#    #    unformattedData = dictReader.
#
#    # TODO
#    #    for row in reader:
#    #        #time = float(row[0])
#    #        for (i, value) in row.enumerate_stripped():
#    #            if i == 0:
#    #                time = float(row[0])
#    #            else:
#
#    return data
Exemple #3
0
def read_integrated_data(filename, listOfEntities, dataSet=None):
    '''
    Reads the results of a Fortran-BioParkin run.
    
    The list of SBML entities is used to associate columns in the entityData file
    with their corresponding entityData entities.
        
    format:
    C{[time [WHITESPACE] par 1 [WHITESPACE] par n}
    
    e.g.:
    C{0.0000000000000000E+00     1.4999999999999999E-02    0.0000000000000000E+00}
    
    '''

    try:
        csv.register_dialect("SimulationData", delimiter=' ', quotechar='"', skipinitialspace=True)
        reader = csv.reader(open(filename), dialect="SimulationData")
        time = []
        values = {}
        for (i, row) in enumerate(reader):  # we don't use the i... yet

            for (j, value) in enumerate(row):
                if j == 0:  # jump over the 0th row
                    time.append(value) # time value is always in the 0th row
                    continue

                columnID = j - 1 # possibility to compensate for column offset
                entity = listOfEntities[columnID]
                if values.has_key(entity):
                    values[entity].append(value) # append to existing list
                else:
                    values[entity] = [value] # create list for the first time

        # create final data structure
        dataMap = {}
        if len(values) > 0: # if we have dataMap
            for entity in values.keys():
                entityData = EntityData()
                if dataSet:
                    entityData.setAssociatedDataSet(dataSet)
                entityData.dataDescriptors = time
                entityData.datapoints = values[entity]
                entityData.type = entitydata.TYPE_SIMULATED
                entityData.sbmlEntity = entity.wrappedEntity
                entityData.originalFilename = filename
                dataMap[entity] = entityData
            return dataMap

    except Exception, e:
        logging.error("Error while trying to read simulation dataMap file: %s" % e)

#def read_integrated_data_old(filename):
#    '''
#    OLD VERSION, DON'T USE!
#    Only for looking up old code!
#
#    Reads the results of a Fortran-BioParkin run.
#
#    input format, e.g. see:
#    /home/bzfwadem/workspace/BioParkin/compute/temp/P_gnrh_rec4_Solution.dat
#
#    format:
#    time \t par 1 \t par n
#
#    '''
#    data = {}
#
#    #with csv.reader(open(filename), delimiter=' ', quotechar='"') as reader:
#    try:
#        csv.register_dialect("SimulationData", delimiter=' ', quotechar='"', skipinitialspace=True)
#        reader = csv.reader(open(filename), dialect="SimulationData")
#        time = []
#        values = {}
#        for (i, row) in enumerate(reader):  # we don't use the i... yet
#
#            for (j, value) in enumerate(row):
#                if j == 0:  # jump over the 0th row
#                    time.append(value) # time value is always in the 0th row
#                    continue
#
#                columnID = j # possibility to compensate for column offset
#                # we will use the column index as the key
#                # TODO: We need something better than the column index. Column indices should be matched to previously loaded experimental data (or parts of the SBML model?)
#                if values.has_key(columnID):
#                    values[columnID].append(value) # append to existing list
#                else:
#                    values[columnID] = [value] # create list for the first time
#
#        # create final data structure
#        if len(values) > 0: # if we have data
#            for dataID in values.keys():
#                #if not data.has_key(dataID):
#                entry = {services.dataservice.TIME: time,
#                         services.dataservice.VALUE: values[dataID]}
#                data[str(dataID)] = entry   #str: because the experimental data's keys are strings
#                #else:
#                #    entry = data[dataID]
#            #                entry[Services.DataService.TIME] = time
#            #                entry[Services.DataService.VALUE].append(value)
#
#    except Exception, e:
#        logging.error("Error while trying to read simulation data file: %s" % e)
#    #    finally:
#    #        reader.close() # does not exist and does not seem to be necessary
#
#
#    #
#    #    dictReader = csv.DictReader(open(filename), ["time", range(nrCols)-1])
#    #    unformattedData = dictReader.
#
#    # TODO
#    #    for row in reader:
#    #        #time = float(row[0])
#    #        for (i, value) in row.enumerate_stripped():
#    #            if i == 0:
#    #                time = float(row[0])
#    #            else:
#
#    return data