예제 #1
0
def get_scenario_data(scenario_id, **kwargs):
    """
        Get all the datasets from the group with the specified name
        @returns a list of dictionaries
    """
    user_id = kwargs.get('user_id')

    scenario_data = DBSession.query(Dataset).filter(
        Dataset.dataset_id == ResourceScenario.dataset_id,
        ResourceScenario.scenario_id == scenario_id).options(
            joinedload_all('metadata')).distinct().all()

    for sd in scenario_data:
        if sd.hidden == 'Y':
            try:
                sd.check_read_permission(user_id)
            except:
                sd.value = None
                sd.frequency = None
                sd.start_time = None
                sd.metadata = []

    DBSession.expunge_all()

    log.info("Retrieved %s datasets", len(scenario_data))
    return scenario_data
예제 #2
0
def convert_dataset(dataset_id, to_unit, **kwargs):
    """Convert a whole dataset (specified by 'dataset_id' to new unit
    ('to_unit'). Conversion ALWAYS creates a NEW dataset, so function
    returns the dataset ID of new dataset.
    """

    ds_i = DBSession.query(Dataset).filter(
        Dataset.dataset_id == dataset_id).one()

    dataset_type = ds_i.data_type

    dsval = ds_i.get_val()
    old_unit = ds_i.data_units

    if old_unit is not None:
        if dataset_type == 'scalar':
            new_val = hydra_units.convert(float(dsval), old_unit, to_unit)
        elif dataset_type == 'array':
            dim = array_dim(dsval)
            vecdata = arr_to_vector(dsval)
            newvec = hydra_units.convert(vecdata, old_unit, to_unit)
            new_val = vector_to_arr(newvec, dim)
        elif dataset_type == 'timeseries':
            new_val = []
            for ts_time, ts_val in dsval.items():
                dim = array_dim(ts_val)
                vecdata = arr_to_vector(ts_val)
                newvec = hydra_units.convert(vecdata, old_unit, to_unit)
                newarr = vector_to_arr(newvec, dim)
                new_val.append(ts_time, newarr)
        elif dataset_type == 'descriptor':
            raise HydraError('Cannot convert descriptor.')

        new_dataset = Dataset()
        new_dataset.data_units = to_unit
        new_dataset.set_val(dataset_type, new_val)
        new_dataset.data_dimen = ds_i.data_dimen
        new_dataset.data_name = ds_i.data_name
        new_dataset.data_type = ds_i.data_type
        new_dataset.hidden = 'N'
        new_dataset.set_metadata(ds_i.get_metadata_as_dict())
        new_dataset.set_hash()

        existing_ds = DBSession.query(Dataset).filter(
            Dataset.data_hash == new_dataset.data_hash).first()

        if existing_ds is not None:
            DBSession.expunge_all()
            return existing_ds.dataset_id

        DBSession.add(new_dataset)
        DBSession.flush()

        return new_dataset.dataset_id

    else:
        raise HydraError('Dataset has no units.')
예제 #3
0
def convert_dataset(dataset_id, to_unit,**kwargs):
    """Convert a whole dataset (specified by 'dataset_id' to new unit
    ('to_unit'). Conversion ALWAYS creates a NEW dataset, so function
    returns the dataset ID of new dataset.
    """

    ds_i = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one()

    dataset_type = ds_i.data_type

    dsval = ds_i.get_val()
    old_unit = ds_i.data_units

    if old_unit is not None:
        if dataset_type == 'scalar':
            new_val = hydra_units.convert(float(dsval), old_unit, to_unit)
        elif dataset_type == 'array':
            dim = array_dim(dsval)
            vecdata = arr_to_vector(dsval)
            newvec = hydra_units.convert(vecdata, old_unit, to_unit)
            new_val = vector_to_arr(newvec, dim)
        elif dataset_type == 'timeseries':
            new_val = []
            for ts_time, ts_val in dsval.items():
                dim = array_dim(ts_val)
                vecdata = arr_to_vector(ts_val)
                newvec = hydra_units.convert(vecdata, old_unit, to_unit)
                newarr = vector_to_arr(newvec, dim)
                new_val.append(ts_time, newarr)
        elif dataset_type == 'descriptor':
            raise HydraError('Cannot convert descriptor.')
        
        new_dataset = Dataset()
        new_dataset.data_units = to_unit
        new_dataset.set_val(dataset_type, new_val)
        new_dataset.data_dimen = ds_i.data_dimen
        new_dataset.data_name  = ds_i.data_name
        new_dataset.data_type  = ds_i.data_type
        new_dataset.hidden     = 'N'
        new_dataset.set_metadata(ds_i.get_metadata_as_dict())
        new_dataset.set_hash()

        existing_ds = DBSession.query(Dataset).filter(Dataset.data_hash==new_dataset.data_hash).first()

        if existing_ds is not None:
            DBSession.expunge_all()
            return existing_ds.dataset_id
        
        DBSession.add(new_dataset)
        DBSession.flush()

        return new_dataset.dataset_id

    else:
        raise HydraError('Dataset has no units.')
예제 #4
0
def get_resource_data(ref_key, ref_id, scenario_id, type_id, **kwargs):
    """
        Get all the resource scenarios for a given resource
        in a given scenario. If type_id is specified, only
        return the resource scenarios for the attributes
        within the type.
    """

    user_id = kwargs.get('user_id')

    #THis can be either a single ID or list, so make them consistent
    if not isinstance(scenario_id, list):
        scenario_id = [scenario_id]

    resource_data_qry = DBSession.query(ResourceScenario).filter(
        ResourceScenario.dataset_id == Dataset.dataset_id,
        ResourceAttr.resource_attr_id == ResourceScenario.resource_attr_id,
        ResourceScenario.scenario_id.in_(scenario_id),
        ResourceAttr.ref_key == ref_key,
        or_(ResourceAttr.network_id == ref_id, ResourceAttr.node_id == ref_id,
            ResourceAttr.link_id == ref_id,
            ResourceAttr.group_id == ref_id)).distinct().options(
                joinedload('resourceattr')).options(
                    joinedload_all('dataset.metadata'))

    if type_id is not None:
        attr_ids = []
        rs = DBSession.query(TypeAttr).filter(
            TypeAttr.type_id == type_id).all()
        for r in rs:
            attr_ids.append(r.attr_id)

        resource_data_qry = resource_data_qry.filter(
            ResourceAttr.attr_id.in_(attr_ids))

    resource_data = resource_data_qry.all()

    for rs in resource_data:
        try:
            rs.dataset.value = zlib.decompress(rs.dataset.value)
        except zlib.error:
            pass

        if rs.dataset.hidden == 'Y':
            try:
                rs.dataset.check_read_permission(user_id)
            except:
                rs.dataset.value = None
                rs.dataset.frequency = None
                rs.dataset.start_time = None

    DBSession.expunge_all()
    return resource_data
예제 #5
0
def get_resource_data(ref_key, ref_id, scenario_id, type_id,**kwargs):
    """
        Get all the resource scenarios for a given resource
        in a given scenario. If type_id is specified, only
        return the resource scenarios for the attributes
        within the type.
    """
    
    user_id = kwargs.get('user_id')
    
    #THis can be either a single ID or list, so make them consistent
    if not isinstance(scenario_id, list):
        scenario_id = [scenario_id]

    resource_data_qry = DBSession.query(ResourceScenario).filter(
        ResourceScenario.dataset_id   == Dataset.dataset_id,
        ResourceAttr.resource_attr_id == ResourceScenario.resource_attr_id,
        ResourceScenario.scenario_id.in_(scenario_id),
        ResourceAttr.ref_key == ref_key,
        or_(
            ResourceAttr.network_id==ref_id,
            ResourceAttr.node_id==ref_id,
            ResourceAttr.link_id==ref_id,
            ResourceAttr.group_id==ref_id
        )).distinct().options(joinedload('resourceattr')).options(joinedload_all('dataset.metadata'))

    if type_id is not None:
        attr_ids = []
        rs = DBSession.query(TypeAttr).filter(TypeAttr.type_id==type_id).all()
        for r in rs:
            attr_ids.append(r.attr_id)

        resource_data_qry = resource_data_qry.filter(ResourceAttr.attr_id.in_(attr_ids))

    resource_data = resource_data_qry.all()

    for rs in resource_data:
        try:
            rs.dataset.value = zlib.decompress(rs.dataset.value)
        except zlib.error:
            pass

        if rs.dataset.hidden == 'Y':
           try:
                rs.dataset.check_read_permission(user_id)
           except:
               rs.dataset.value      = None
               rs.dataset.frequency  = None
               rs.dataset.start_time = None

    DBSession.expunge_all()
    return resource_data
예제 #6
0
def get_scenario_data(scenario_id,**kwargs):
    """
        Get all the datasets from the group with the specified name
        @returns a list of dictionaries
    """
    user_id = kwargs.get('user_id')

    scenario_data = DBSession.query(Dataset).filter(Dataset.dataset_id==ResourceScenario.dataset_id, ResourceScenario.scenario_id==scenario_id).options(joinedload_all('metadata')).distinct().all()

    for sd in scenario_data:
       if sd.hidden == 'Y':
           try:
                sd.check_read_permission(user_id)
           except:
               sd.value      = None
               sd.frequency  = None
               sd.start_time = None
               sd.metadata = []

    DBSession.expunge_all()

    log.info("Retrieved %s datasets", len(scenario_data))
    return scenario_data
예제 #7
0
def get_scenarios_data(networks, nodes, links, scenario_id, attr_id, type_id,
                       **kwargs):
    """
        Get all the resource scenarios for a given attribute and/or type
        in a given scenario.
    """

    user_id = kwargs.get('user_id')

    # This can be either a single ID or list, so make them consistent
    if not isinstance(scenario_id, list):
        scenario_id = [scenario_id]

    scenarios = DBSession.query(Scenario).filter(
        Scenario.scenario_id.in_(scenario_id)).all()
    for scenario in scenarios:
        resource_data_qry = DBSession.query(ResourceScenario).filter(
            ResourceScenario.dataset_id == Dataset.dataset_id,
            ResourceAttr.resource_attr_id == ResourceScenario.resource_attr_id,
            ResourceScenario.scenario_id == scenario.scenario_id) \
            .distinct() \
            .options(joinedload('resourceattr')) \
            .options(joinedload_all('dataset.metadata'))

        if attr_id:
            resource_data_qry = resource_data_qry.filter(
                ResourceAttr.attr_id.in_(set(attr_id)))

        if networks and nodes and links:
            resource_data_qry = resource_data_qry.filter(
                or_(ResourceAttr.network_id.in_(set(networks)),
                    ResourceAttr.node_id.in_(set(nodes)),
                    ResourceAttr.link_id.in_(set(links))))
        if nodes and links:
            resource_data_qry = resource_data_qry.filter(
                or_(ResourceAttr.node_id.in_(set(nodes)),
                    ResourceAttr.link_id.in_(set(links))))
        if networks and nodes:
            resource_data_qry = resource_data_qry.filter(
                or_(
                    ResourceAttr.network_id.in_(set(networks)),
                    ResourceAttr.node_id.in_(set(nodes)),
                ))
        if networks and links:
            resource_data_qry = resource_data_qry.filter(
                or_(ResourceAttr.network_id.in_(set(networks)),
                    ResourceAttr.link_id.in_(set(links))))
        elif networks:
            resource_data_qry = resource_data_qry.filter(
                ResourceAttr.network_id.in_(set(networks)))
        elif nodes:
            resource_data_qry = resource_data_qry.filter(
                ResourceAttr.node_id.in_(set(nodes)))
        elif links:
            resource_data_qry = resource_data_qry.filter(
                ResourceAttr.link_id.in_(set(links)))

        resource_data = resource_data_qry.all()

        for rs in resource_data:
            try:
                rs.dataset.value = zlib.decompress(rs.dataset.value)
            except zlib.error:
                pass

            if rs.dataset.hidden == 'Y':
                try:
                    rs.dataset.check_read_permission(user_id)
                except:
                    rs.dataset.value = None
                    rs.dataset.frequency = None
                    rs.dataset.start_time = None
        scenario.resourcescenarios = resource_data
        scenario.resourcegroupitems = []
    DBSession.expunge_all()
    return scenarios