Пример #1
0
def post_dataset(info_role):
    if info_role.value_filter == "0":
        raise InsufficientRightsError(
            ('User "{}" cannot "{}" a dataser').format(
                info_role.id_role, info_role.code_action
            ),
            403,
        )

    data = dict(request.get_json())
    cor_dataset_actor = data.pop("cor_dataset_actor")

    dataset = TDatasets(**data)

    for cor in cor_dataset_actor:
        # remove id_cda if None otherwise merge no working well
        if "id_cda" in cor and cor["id_cda"] is None:
            cor.pop("id_cda")
        dataset.cor_dataset_actor.append(CorDatasetActor(**cor))

    if dataset.id_dataset:
        DB.session.merge(dataset)
    else:
        DB.session.add(dataset)
    DB.session.commit()
    return dataset.as_dict(True)
Пример #2
0
def post_dataset(info_role):
    """
    Post a dataset

    .. :quickref: Metadata;
    """
    if info_role.value_filter == "0":
        raise InsufficientRightsError(
            ('User "{}" cannot "{}" a dataset').format(info_role.id_role,
                                                       info_role.code_action),
            403,
        )

    data = dict(request.get_json())
    cor_dataset_actor = data.pop("cor_dataset_actor")
    modules = data.pop("modules")

    dataset = TDatasets(**data)
    for cor in cor_dataset_actor:
        # remove id_cda if None otherwise merge no working well
        if "id_cda" in cor and cor["id_cda"] is None:
            cor.pop("id_cda")
        dataset.cor_dataset_actor.append(CorDatasetActor(**cor))

    # init the relationship as an empty list
    modules_obj = (DB.session.query(TModules).filter(
        TModules.id_module.in_(modules)).all())
    dataset.modules = modules_obj
    if dataset.id_dataset:
        DB.session.merge(dataset)
    else:
        DB.session.add(dataset)
    DB.session.commit()
    return dataset.as_dict(True)
Пример #3
0
def post_dataset(info_role):
    if info_role.value_filter == "0":
        raise InsufficientRightsError(
            ('User "{}" cannot "{}" a dataset').format(
                info_role.id_role, info_role.code_action
            ),
            403,
        )

    data = dict(request.get_json())
    cor_dataset_actor = data.pop("cor_dataset_actor")

    dataset = TDatasets(**data)

    for cor in cor_dataset_actor:
        # remove id_cda if None otherwise merge no working well
        if "id_cda" in cor and cor["id_cda"] is None:
            cor.pop("id_cda")
        dataset.cor_dataset_actor.append(CorDatasetActor(**cor))

    if dataset.id_dataset:
        DB.session.merge(dataset)
    else:
        DB.session.add(dataset)
    DB.session.commit()
    return dataset.as_dict(True)
Пример #4
0
def post_dataset(info_role):
    """
    Post a dataset

    .. :quickref: Metadata;
    """
    data = dict(request.get_json())
    cor_dataset_actor = data.pop("cor_dataset_actor")
    modules = data.pop("modules")

    dataset = TDatasets(**data)
    for cor in cor_dataset_actor:
        # remove id_cda if None otherwise merge no working well
        if "id_cda" in cor and cor.get("id_cda") is None:
            cor.pop("id_cda")
        dataset.cor_dataset_actor.append(CorDatasetActor(**cor))

    # init the relationship as an empty list
    modules_obj = DB.session.query(TModules).filter(TModules.id_module.in_(modules)).all()
    dataset.modules = modules_obj
    if dataset.id_dataset:
        DB.session.merge(dataset)
    # add id_digitiser only on creation
    else:
        dataset.id_digitizer = info_role.id_role
        DB.session.add(dataset)
    DB.session.commit()
    return dataset.as_dict(True)
Пример #5
0
def filter_query_with_cruved(
    model,
    q,
    user,
    id_station_col="id_station",
    id_dataset_column="id_dataset",
    observers_column="observers_txt",
    id_digitiser_column="id_digitiser",
    filter_on_obs_txt=True,
    with_generic_table=False,
):
    """
    Filter the query with the cruved authorization of a user

    Returns:
        - A SQLA Query object
    """
    # if with geniric table , the column are located in model.columns, else in model
    if with_generic_table:
        model_temp = model.columns
    else:
        model_temp = model
    # get the mandatory column
    try:
        model_id_station_col = getattr(model_temp, id_station_col)
        model_id_dataset_column = getattr(model_temp, id_dataset_column)
        model_observers_column = getattr(model_temp, observers_column)
        model_id_digitiser_column = getattr(model_temp, id_digitiser_column)
    except AttributeError as e:
        raise GeonatureApiError(
            """the {model} table     does not have a column {e}
             If you change the {model} table, please edit your synthese config (cf EXPORT_***_COL)
            """.format(
                e=e, model=model
            )
        )
    if user.value_filter in ("1", "2"):

        sub_query_id_role = DB.session.query(CorStationObserverOccHab).filter(
            CorStationObserverOccHab.id_role == user.id_role).exists()
        ors_filters = [
            sub_query_id_role,
            model_id_digitiser_column == user.id_role,
        ]
        if filter_on_obs_txt:
            user_fullname1 = user.nom_role + " " + user.prenom_role + "%"
            user_fullname2 = user.prenom_role + " " + user.nom_role + "%"
            ors_filters.append(model_observers_column.ilike(user_fullname1))
            ors_filters.append(model_observers_column.ilike(user_fullname2))
        if user.value_filter == "1":
            allowed_datasets = TDatasets.get_user_datasets(
                user, only_query=True, only_user=True).exists()
            ors_filters.append(allowed_datasets)
            q = q.filter(or_(*ors_filters))
        elif user.value_filter == "2":
            allowed_datasets = TDatasets.get_user_datasets(
                user, only_query=True).exists()
            ors_filters.append(allowed_datasets)
        q = q.filter(or_(*ors_filters))
    return q
Пример #6
0
    def filter_query_with_cruved(self, user):
        """
        Filter the query with the cruved authorization of a user
        """
        if user.value_filter in ("1", "2"):
            # get id synthese where user is observer
            subquery_observers = (select([
                CorObserverSynthese.id_synthese
            ]).select_from(CorObserverSynthese).where(
                CorObserverSynthese.id_role == user.id_role))
            ors_filters = [
                self.model_id_syn_col.in_(subquery_observers),
                self.model_id_digitiser_column == user.id_role,
            ]
            if current_app.config["SYNTHESE"][
                    "CRUVED_SEARCH_WITH_OBSERVER_AS_TXT"]:
                user_fullname1 = user.nom_role + " " + user.prenom_role + "%"
                user_fullname2 = user.prenom_role + " " + user.nom_role + "%"
                ors_filters.append(
                    self.model_observers_column.ilike(user_fullname1))
                ors_filters.append(
                    self.model_observers_column.ilike(user_fullname2))

            if user.value_filter == "1":
                allowed_datasets = TDatasets.get_user_datasets(user,
                                                               only_user=True)
                ors_filters.append(
                    self.model_id_dataset_column.in_(allowed_datasets))
                self.query = self.query.where(or_(*ors_filters))
            elif user.value_filter == "2":
                allowed_datasets = TDatasets.get_user_datasets(user)
                ors_filters.append(
                    self.model_id_dataset_column.in_(allowed_datasets))
                self.query = self.query.where(or_(*ors_filters))
Пример #7
0
def create_dataset_user(user):
    """
        After dataset validation, add a personnal AF and JDD so the user 
        can add new user.
    """
    af_desc_and_name = "Cadre d'acquisition personnel de {name} {surname}".format(
        name=user["nom_role"], surname=user["prenom_role"])

    #  actor = data productor
    af_productor = CorAcquisitionFrameworkActor(
        id_role=user["id_role"],
        id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature(
            "ROLE_ACTEUR", "6"),
    )
    af_contact = CorAcquisitionFrameworkActor(
        id_role=user["id_role"],
        id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature(
            "ROLE_ACTEUR", "1"),
    )

    new_af = TAcquisitionFramework(
        acquisition_framework_name=af_desc_and_name,
        acquisition_framework_desc=af_desc_and_name +
        " - auto-créé via la demande de création de compte",
        acquisition_framework_start_date=datetime.datetime.now(),
    )

    new_af.cor_af_actor = [af_productor, af_contact]

    DB.session.add(new_af)
    DB.session.commit()

    ds_desc_and_name = "Jeu de données personnel de {name} {surname}".format(
        name=user["nom_role"], surname=user["prenom_role"])
    ds_productor = CorDatasetActor(
        id_role=user["id_role"],
        id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature(
            "ROLE_ACTEUR", "6"),
    )
    ds_contact = CorDatasetActor(
        id_role=user["id_role"],
        id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature(
            "ROLE_ACTEUR", "1"),
    )
    # add new JDD: terrestrial and marine = True as default
    new_dataset = TDatasets(
        id_acquisition_framework=new_af.id_acquisition_framework,
        dataset_name=ds_desc_and_name,
        dataset_shortname=ds_desc_and_name +
        " - auto-créé via la demande de création de compte",
        dataset_desc=ds_desc_and_name,
        marine_domain=True,
        terrestrial_domain=True,
    )
    new_dataset.cor_dataset_actor = [ds_productor, ds_contact]
    DB.session.add(new_dataset)
    DB.session.commit()
Пример #8
0
def post_jdd_from_user(id_user=None, id_organism=None):
    """ Post a jdd from the mtd XML"""
    xml_jdd = None
    xml_jdd = get_jdd_by_user_id(id_user)

    if xml_jdd:
        dataset_list = parse_jdd_xml(xml_jdd)
        dataset_list_model = []
        for ds in dataset_list:
            new_af = post_acquisition_framework(
                uuid=ds['uuid_acquisition_framework'],
                id_user=id_user,
                id_organism=id_organism
            )
            ds['id_acquisition_framework'] = new_af['id_acquisition_framework']

            ds.pop('uuid_acquisition_framework')
            # get the id of the dataset to check if exists
            id_dataset = TDatasets.get_id(ds['unique_dataset_id'])
            ds['id_dataset'] = id_dataset

            dataset = TDatasets(**ds)

            # id_role in cor_dataset_actor
            actor = CorDatasetActor(
                id_role=id_user,
                id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature('ROLE_ACTEUR', '1')
            )
            dataset.cor_dataset_actor.append(actor)
            # id_organism in cor_dataset_actor
            if id_organism:
                actor = CorDatasetActor(
                    id_organism=id_organism,
                    id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature('ROLE_ACTEUR', '1')
                )
                dataset.cor_dataset_actor.append(actor)

            dataset_list_model.append(dataset)
            try:
                if id_dataset:
                    DB.session.merge(dataset)
                else:
                    DB.session.add(dataset)
                DB.session.commit()
                DB.session.flush()
            # TODO catch db error ?
            except SQLAlchemyError as e:
                DB.session.rollback()
                error_msg = """
                Error posting JDD {} \n\n Trace: \n {}
                """.format(ds['unique_dataset_id'], e)
                log.error(error_msg)                
                raise GeonatureApiError(error_msg)

        return [d.as_dict() for d in dataset_list_model]
    return {'message': 'Not found'}, 404
Пример #9
0
def post_dataset():
    data = dict(request.get_json())
    cor_dataset_actor = data.pop('cor_dataset_actor')

    dataset = TDatasets(**data)

    for cor in cor_dataset_actor:
        dataset.cor_dataset_actor.append(CorDatasetActor(**cor))

    if dataset.id_dataset:
        DB.session.merge(dataset)
    else:
        DB.session.add(dataset)
    DB.session.commit()
    return dataset.as_dict(True)
Пример #10
0
def general_stats(info_role):
    """Return stats about synthese.

    .. :quickref: Synthese;

        - nb of observations
        - nb of distinct species
        - nb of distinct observer
        - nb ob datasets
    """
    allowed_datasets = TDatasets.get_user_datasets(info_role)
    q = DB.session.query(
        func.count(Synthese.id_dataset),
        func.count(func.distinct(Synthese.cd_nom)),
        func.count(func.distinct(Synthese.observers)),
    )
    q = synthese_query.filter_query_with_cruved(Synthese, q, info_role)
    data = q.one()
    data = {
        "nb_data": data[0],
        "nb_species": data[1],
        "nb_observers": data[2],
        "nb_dataset": len(allowed_datasets),
    }
    return data
Пример #11
0
 def filter_query_generic_table(self, user):
     """
     Return a prepared query filter with cruved authorization
     from a generic_table (a view)
     """
     q = DB.session.query(self.model.tableDef)
     if user.value_filter in ("1", "2"):
         q = q.outerjoin(
             corRoleRelevesOccurrence,
             self.model.tableDef.columns.id_releve_occtax
             == corRoleRelevesOccurrence.columns.id_releve_occtax,
         )
         if user.value_filter == "2":
             allowed_datasets = TDatasets.get_user_datasets(user)
             q = q.filter(
                 or_(
                     self.model.tableDef.columns.id_dataset.in_(
                         tuple(allowed_datasets)
                     ),
                     corRoleRelevesOccurrence.columns.id_role == user.id_role,
                     self.model.tableDef.columns.id_digitiser == user.id_role,
                 )
             )
         elif user.value_filter == "1":
             q = q.filter(
                 or_(
                     corRoleRelevesOccurrence.columns.id_role == user.id_role,
                     self.model.tableDef.columns.id_digitiser == user.id_role,
                 )
             )
     return q
Пример #12
0
    def filter_query_with_cruved(self, user):
        """
        Filter the query with the cruved authorization of a user
        """
        allowed_datasets = TDatasets.get_user_datasets(user)
        if user.value_filter in ("1", "2"):
            self.add_join(
                CorObserverSynthese,
                CorObserverSynthese.id_synthese,
                self.model.id_synthese,
                join_type="left",
            )

            ors_filters = [
                CorObserverSynthese.id_role == user.id_role,
                self.model.id_digitiser == user.id_role,
            ]
            if current_app.config["SYNTHESE"]["CRUVED_SEARCH_WITH_OBSERVER_AS_TXT"]:
                user_fullname1 = user.nom_role + " " + user.prenom_role + "%"
                user_fullname2 = user.prenom_role + " " + user.nom_role + "%"
                ors_filters.append(self.model.observers.ilike(user_fullname1))
                ors_filters.append(self.model.observers.ilike(user_fullname2))

            if user.value_filter == "1":
                self.query = self.query.where(or_(*ors_filters))
            elif user.value_filter == "2":
                ors_filters.append(self.model.id_dataset.in_(allowed_datasets))
                self.query = self.query.where(or_(*ors_filters))
Пример #13
0
def delete_dataset(info_role, ds_id):
    """
    Delete a dataset

    .. :quickref: Metadata;
    """

    if not is_dataset_deletable(ds_id):
        raise GeonatureApiError(
            "La suppression du jeu de données n'est pas possible car des données y sont rattachées dans la Synthèse",
            406,
        )
    user_actor = TDatasets.get_user_datasets(info_role)
    dataset = TDatasets.query.get(ds_id)
    allowed = dataset.user_is_allowed_to(user_actor, info_role,
                                         info_role.value_filter)
    if not allowed:
        raise Forbidden(
            f"User {info_role.id_role} cannot delete dataset {dataset.id_dataset}"
        )

    DB.session.query(TDatasets).filter(TDatasets.id_dataset == ds_id).delete()

    DB.session.commit()

    return '', 204
Пример #14
0
 def filter_query_generic_table(self, user):
     """
     Return a prepared query filter with cruved authorization
     from a generic_table (a view)
     """
     q = DB.session.query(self.model.tableDef)
     if user.value_filter in ("1", "2"):
         q = q.outerjoin(
             corRoleRelevesOccurrence,
             self.model.tableDef.columns.id_releve_occtax ==
             corRoleRelevesOccurrence.id_releve_occtax,
         )
         if user.value_filter == "2":
             allowed_datasets = TDatasets.get_user_datasets(user)
             q = q.filter(
                 or_(
                     self.model.tableDef.columns.id_dataset.in_(
                         tuple(allowed_datasets)),
                     corRoleRelevesOccurrence.id_role == user.id_role,
                     self.model.tableDef.columns.id_digitiser ==
                     user.id_role,
                 ))
         elif user.value_filter == "1":
             q = q.filter(
                 or_(
                     corRoleRelevesOccurrence.id_role == user.id_role,
                     self.model.tableDef.columns.id_digitiser ==
                     user.id_role,
                 ))
     return q
Пример #15
0
def create_dataset():
    """
    Post one Dataset data
    .. :quickref: Metadata;
    """
    return DatasetSchema().jsonify(
        datasetHandler(dataset=TDatasets(id_digitizer=g.current_user.id_role),
                       data=request.get_json()))
Пример #16
0
def export(info_role):
    filters = dict(request.args)
    if 'limit' in filters:
        result_limit = filters.pop('limit')[0]
    else:
        result_limit = current_app.config['SYNTHESE']['NB_MAX_OBS_EXPORT']

    export_format = filters.pop('export_format')[0]
    allowed_datasets = TDatasets.get_user_datasets(info_role)

    q = DB.session.query(VSyntheseForExport)
    q = synthese_query.filter_query_all_filters(VSyntheseForExport, q, filters,
                                                info_role, allowed_datasets)

    q = q.order_by(VSyntheseForExport.date_min.desc())
    data = q.limit(result_limit)

    file_name = datetime.datetime.now().strftime('%Y_%m_%d_%Hh%Mm%S')
    file_name = filemanager.removeDisallowedFilenameChars(file_name)
    formated_data = [d.as_dict_ordered() for d in data]

    export_columns = formated_data[0].keys()
    if export_format == 'csv':
        return to_csv_resp(
            file_name,
            formated_data,
            separator=';',
            columns=export_columns,
        )

    elif export_format == 'geojson':
        results = FeatureCollection(formated_data)
        return to_json_resp(results,
                            as_file=True,
                            filename=file_name,
                            indent=4)
    else:
        filemanager.delete_recursively(str(ROOT_DIR /
                                           'backend/static/shapefiles'),
                                       excluded_files=['.gitkeep'])

        dir_path = str(ROOT_DIR / 'backend/static/shapefiles')
        FionaShapeService.create_shapes_struct(
            db_cols=VSyntheseForExport.db_cols,
            srid=current_app.config['LOCAL_SRID'],
            dir_path=dir_path,
            file_name=file_name,
            col_mapping=current_app.config['SYNTHESE']['EXPORT_COLUMNS'])
        for row in data:
            geom = row.the_geom_local
            row_as_dict = row.as_dict_ordered()
            FionaShapeService.create_feature(row_as_dict, geom)

        FionaShapeService.save_and_zip_shapefiles()

        return send_from_directory(dir_path,
                                   file_name + '.zip',
                                   as_attachment=True)
Пример #17
0
def create_dataset(info_role):
   """
   Post one Dataset data
   .. :quickref: Metadata;
   """

   # create new dataset
   return DatasetSchema().jsonify(
       datasetHandler(request=request, dataset=TDatasets(), info_role=info_role)
   )
Пример #18
0
 def create_dataset(digitizer=None):
     with db.session.begin_nested():
         dataset = TDatasets(
             id_acquisition_framework=af.id_acquisition_framework,
             dataset_name='test',
             dataset_shortname='test',
             dataset_desc='test',
             marine_domain=True,
             terrestrial_domain=True,
             id_digitizer=digitizer.id_role if digitizer else None)
         db.session.add(dataset)
     return dataset
Пример #19
0
def get_status(info_role):
    """
    Route to get all the protection status of a synthese search
    """

    filters = dict(request.args)

    q = (DB.session.query(distinct(VSyntheseForWebApp.cd_nom), Taxref,
                          TaxrefProtectionArticles).join(
                              Taxref,
                              Taxref.cd_nom == VSyntheseForWebApp.cd_nom).join(
                                  TaxrefProtectionEspeces,
                                  TaxrefProtectionEspeces.cd_nom ==
                                  VSyntheseForWebApp.cd_nom).join(
                                      TaxrefProtectionArticles,
                                      TaxrefProtectionArticles.cd_protection ==
                                      TaxrefProtectionEspeces.cd_protection))

    allowed_datasets = TDatasets.get_user_datasets(info_role)
    q = synthese_query.filter_query_all_filters(VSyntheseForWebApp, q, filters,
                                                info_role, allowed_datasets)
    data = q.all()

    protection_status = []
    for d in data:
        taxon = d[1].as_dict()
        protection = d[2].as_dict()
        row = OrderedDict([
            ('nom_complet', taxon['nom_complet']),
            ('nom_vern', taxon['nom_vern']),
            ('cd_nom', taxon['cd_nom']),
            ('cd_ref', taxon['cd_ref']),
            ('type_protection', protection['type_protection']),
            ('article', protection['article']),
            ('intitule', protection['intitule']),
            ('arrete', protection['arrete']),
            ('date_arrete', protection['date_arrete']),
            ('url', protection['url']),
        ])
        protection_status.append(row)

    export_columns = [
        'nom_complet', 'nom_vern', 'cd_nom', 'cd_ref', 'type_protection',
        'article', 'intitule', 'arrete', 'date_arrete', 'url'
    ]

    file_name = datetime.datetime.now().strftime('%Y_%m_%d_%Hh%Mm%S')
    return to_csv_resp(
        file_name,
        protection_status,
        separator=';',
        columns=export_columns,
    )
Пример #20
0
def get_dataset_details_dict(id_dataset, session_role):
    """
    Return a dataset from TDatasetDetails model (with all relationships)
    return also the number of taxon and observation of the dataset
    Use for get_one datasert
    """
    q = DB.session.query(TDatasetDetails)
    q = cruved_filter(q, TDatasetDetails, session_role)
    try:
        data = q.filter(TDatasetDetails.id_dataset == id_dataset).one()
    except NoResultFound:
        return None

    dataset = data.as_dict(True)

    imports = requests.get(
        current_app.config["API_ENDPOINT"] + "/import/by_dataset/" +
        str(id_dataset),
        headers={"Cookie":
                 request.headers.get("Cookie")},  # recuperation du token
    )
    if imports.status_code == 200:
        dataset["imports"] = imports.json()

    user_cruved = cruved_scope_for_user_in_module(
        id_role=session_role.id_role,
        module_code="METADATA",
    )[0]
    cruved = data.get_object_cruved(
        user_cruved=user_cruved,
        id_object=data.id_dataset,
        ids_object_user=TDatasets.get_user_datasets(session_role,
                                                    only_user=True),
        ids_object_organism=TDatasets.get_user_datasets(session_role,
                                                        only_user=False),
    )
    dataset["cruved"] = cruved
    return dataset
Пример #21
0
 def create_dataset(name, digitizer=None):
     with db.session.begin_nested():
         dataset = TDatasets(
             id_acquisition_framework=af.id_acquisition_framework,
             dataset_name=name,
             dataset_shortname=name,
             dataset_desc=name,
             marine_domain=True,
             terrestrial_domain=True,
             id_digitizer=digitizer.id_role if digitizer else None)
         db.session.add(dataset)
         if digitizer and digitizer.organisme:
             actor = CorDatasetActor(
                 organism=digitizer.organisme,
                 nomenclature_actor_role=principal_actor_role)
             dataset.cor_dataset_actor.append(actor)
     return dataset
Пример #22
0
 def filter_query_with_autorization(self, user):
     q = DB.session.query(self.model)
     if user.value_filter == "2":
         allowed_datasets = TDatasets.get_user_datasets(user)
         q = q.filter(
             or_(
                 self.model.id_dataset.in_(tuple(allowed_datasets)),
                 self.model.observers.any(id_role=user.id_role),
                 self.model.id_digitiser == user.id_role,
             ))
     elif user.value_filter == "1":
         q = q.filter(
             or_(
                 self.model.observers.any(id_role=user.id_role),
                 self.model.id_digitiser == user.id_role,
             ))
     return q
Пример #23
0
 def filter_query_with_autorization(self, user):
     q = DB.session.query(self.model)
     if user.value_filter == "2":
         allowed_datasets = TDatasets.get_user_datasets(user)
         q = q.filter(
             or_(
                 self.model.id_dataset.in_(tuple(allowed_datasets)),
                 self.model.observers.any(id_role=user.id_role),
                 self.model.id_digitiser == user.id_role,
             )
         )
     elif user.value_filter == "1":
         q = q.filter(
             or_(
                 self.model.observers.any(id_role=user.id_role),
                 self.model.id_digitiser == user.id_role,
             )
         )
     return q
Пример #24
0
def get_synthese(info_role):
    """
        return synthese row(s) filtered by form params
        Params must have same synthese fields names
    """
    filters = {
        key: value[0].split(',')
        for key, value in dict(request.args).items()
    }
    if 'limit' in filters:
        result_limit = filters.pop('limit')[0]
    else:
        result_limit = current_app.config['SYNTHESE']['NB_MAX_OBS_MAP']

    allowed_datasets = TDatasets.get_user_datasets(info_role)

    q = DB.session.query(VSyntheseForWebApp)

    q = synthese_query.filter_query_all_filters(VSyntheseForWebApp, q, filters,
                                                info_role, allowed_datasets)
    q = q.order_by(VSyntheseForWebApp.date_min.desc())
    nb_total = 0

    data = q.limit(result_limit)
    columns = current_app.config['SYNTHESE'][
        'COLUMNS_API_SYNTHESE_WEB_APP'] + MANDATORY_COLUMNS
    features = []
    for d in data:
        feature = d.get_geofeature(columns=columns)
        feature['properties'][
            'nom_vern_or_lb_nom'] = d.lb_nom if d.nom_vern is None else d.nom_vern
        features.append(feature)
    return {
        'data':
        FeatureCollection(features),
        'nb_obs_limited':
        nb_total == current_app.config['SYNTHESE']['NB_MAX_OBS_MAP'],
        'nb_total':
        nb_total
    }
Пример #25
0
def general_stats(info_role):
    """
    Return stats about synthese
        - nb of observations
        - nb of distinct species
        - nb of distinct observer
        - nb ob datasets
    """
    allowed_datasets = TDatasets.get_user_datasets(info_role)
    q = DB.session.query(
        func.count(Synthese.id_dataset),
        func.count(func.distinct(Synthese.cd_nom)),
        func.count(func.distinct(Synthese.observers)),
    )
    q = synthese_query.filter_query_with_cruved(Synthese, q, info_role)
    data = q.one()
    data = {
        "nb_data": data[0],
        "nb_species": data[1],
        "nb_observers": data[2],
        "nb_dataset": len(allowed_datasets),
    }
    return data
Пример #26
0
def delete_synthese(info_role, id_synthese):
    synthese_obs = DB.session.query(Synthese).get(id_synthese)
    user_datasets = TDatasets.get_user_datasets(info_role)
    synthese_releve = synthese_obs.get_observation_if_allowed(
        info_role, user_datasets)

    # get and delete source
    # TODO
    # est-ce qu'on peut supprimer les données historiques depuis la synthese
    source = DB.session.query(TSources).filter(
        TSources.id_source == synthese_obs.id_source).one()
    pk_field_source = source.entity_source_pk_field
    inter = pk_field_source.split('.')
    pk_field = inter.pop()
    table_source = inter.join('.')
    sql = text("DELETE FROM {table} WHERE {pk_field} = :id".format(
        table=table_source, pk_field=pk_field))
    result = DB.engine.execute(sql, id=synthese_obs.entity_source_pk_value)

    # delete synthese obs
    DB.session.delete(synthese_releve)
    DB.session.commit()

    return {'message': 'delete with success'}, 200
Пример #27
0
 def user_is_in_dataset_actor(self, user):
     only_user = user.value_filter == "1"
     return self.id_dataset in TDatasets.get_user_datasets(
         user, only_user=only_user)
Пример #28
0
def post_jdd_from_user(id_user=None):
    """ Post a jdd from the mtd XML"""
    xml_jdd = None
    xml_jdd = get_jdd_by_user_id(id_user)
    if xml_jdd:
        dataset_list = parse_jdd_xml(xml_jdd)
        posted_af_uuid = {}
        for ds in dataset_list:
            actors = ds.pop("actors")
            # prevent to not fetch, post or merge the same acquisition framework multiple times
            if ds["uuid_acquisition_framework"] not in posted_af_uuid:
                new_af = post_acquisition_framework(
                    uuid=ds["uuid_acquisition_framework"], )
                # build a cached dict like {'<uuid>': 'id_acquisition_framework}
                posted_af_uuid[ds["uuid_acquisition_framework"]] = new_af[
                    "id_acquisition_framework"]
            # get the id from the uuid
            ds["id_acquisition_framework"] = posted_af_uuid.get(
                ds["uuid_acquisition_framework"])

            ds.pop("uuid_acquisition_framework")
            # get the id of the dataset to check if exists
            id_dataset = TDatasets.get_id(ds["unique_dataset_id"])
            ds["id_dataset"] = id_dataset
            # search nomenclature
            ds_copy = copy(ds)
            for key, value in ds_copy.items():
                if key.startswith("id_nomenclature"):
                    response = DB.session.query(
                        func.ref_nomenclatures.get_id_nomenclature(
                            NOMENCLATURE_MAPPING.get(key),
                            value)).one_or_none()
                    if response and response[0]:
                        ds[key] = response[0]
                    else:
                        ds.pop(key)

            #  set validable = true
            ds["validable"] = True
            dataset = TDatasets(**ds)
            # if the dataset already exist
            if id_dataset:
                # delete cor_ds_actor
                dataset.id_dataset = id_dataset

                delete_q = CorDatasetActor.__table__.delete().where(
                    CorDatasetActor.id_dataset == id_dataset)
                DB.session.execute(delete_q)
                DB.session.commit()

                # create the correlation links
                create_cor_object_actors(actors, dataset)
                add_dataset_module(dataset)
                DB.session.merge(dataset)

            # its a new DS
            else:
                # set the dataset as activ
                dataset.active = True
                # create the correlation links
                create_cor_object_actors(actors, dataset)
                add_dataset_module(dataset)
                # Add the new DS
                DB.session.add(dataset)
            # try to commit
            try:
                DB.session.commit()
            # TODO catch db error ?
            except SQLAlchemyError as e:
                error_msg = "Error posting a dataset\nTrace:\n{} \n\n ".format(
                    e)
                log.error(error_msg)
Пример #29
0
def get_af_and_ds_metadata(info_role):
    """
    Get all AF with their datasets 
    The Cruved in only apply on dataset in order to see all the AF
    where the user have rights with its dataset
    Use in maplist
    Add the CRUVED permission for each row (Dataset and AD)
    
    .. :quickref: Metadata;

    :param info_role: add with kwargs
    :type info_role: TRole
    :returns:  `dict{'data':list<AF with Datasets>, 'with_erros': <boolean>}`
    """
    with_mtd_error = False
    if current_app.config["CAS_PUBLIC"]["CAS_AUTHENTIFICATION"]:
        # synchronise the CA and JDD from the MTD WS
        try:
            mtd_utils.post_jdd_from_user(id_user=info_role.id_role,
                                         id_organism=info_role.id_organisme)
        except Exception as e:
            gunicorn_error_logger.info(e)
            log.error(e)
            with_mtd_error = True
    params = request.args.to_dict()
    params["orderby"] = "dataset_name"
    datasets = get_datasets_cruved(info_role, params, as_model=True)
    ids_dataset_user = TDatasets.get_user_datasets(info_role, only_user=True)
    ids_dataset_organisms = TDatasets.get_user_datasets(info_role,
                                                        only_user=False)
    ids_afs_user = TAcquisitionFramework.get_user_af(info_role, only_user=True)
    ids_afs_org = TAcquisitionFramework.get_user_af(info_role, only_user=False)
    user_cruved = cruved_scope_for_user_in_module(
        id_role=info_role.id_role,
        module_code="METADATA",
    )[0]

    #  get all af from the JDD filtered with cruved or af where users has rights
    ids_afs_cruved = [
        d.id_acquisition_framework
        for d in get_af_cruved(info_role, as_model=True)
    ]
    list_id_af = [d.id_acquisition_framework
                  for d in datasets] + ids_afs_cruved
    afs = (DB.session.query(TAcquisitionFramework).filter(
        TAcquisitionFramework.id_acquisition_framework.in_(
            list_id_af)).order_by(
                TAcquisitionFramework.acquisition_framework_name).all())

    afs_dict = []
    #  get cruved for each AF and prepare dataset
    for af in afs:
        af_dict = af.as_dict()
        af_dict["cruved"] = af.get_object_cruved(
            user_cruved=user_cruved,
            id_object=af.id_acquisition_framework,
            ids_object_user=ids_afs_user,
            ids_object_organism=ids_afs_org,
        )
        af_dict["datasets"] = []
        afs_dict.append(af_dict)

    #  get cruved for each ds and push them in the af
    for d in datasets:
        dataset_dict = d.as_dict()
        dataset_dict["cruved"] = d.get_object_cruved(
            user_cruved=user_cruved,
            id_object=d.id_dataset,
            ids_object_user=ids_dataset_user,
            ids_object_organism=ids_dataset_organisms,
        )
        af_of_dataset = get_af_from_id(d.id_acquisition_framework, afs_dict)
        af_of_dataset["datasets"].append(dataset_dict)

    afs_resp = {"data": afs_dict}
    if with_mtd_error:
        afs_resp["with_mtd_errors"] = True
    if not datasets:
        return afs_resp, 404
    return afs_resp
Пример #30
0
def post_jdd_from_user(id_user=None, id_organism=None):
    """ Post a jdd from the mtd XML"""
    xml_jdd = None
    xml_jdd = get_jdd_by_user_id(id_user)
    dataset_list_model = []

    if xml_jdd:
        dataset_list = parse_jdd_xml(xml_jdd)
        posted_af_uuid = {}
        for ds in dataset_list:
            # prevent to not fetch, post or merge the same acquisition framework multiple times
            if ds['uuid_acquisition_framework'] not in posted_af_uuid:
                new_af = post_acquisition_framework(
                    uuid=ds['uuid_acquisition_framework'],
                    id_user=id_user,
                    id_organism=id_organism)
                # build a cached dict like {'<uuid>': 'id_acquisition_framework}
                posted_af_uuid[ds['uuid_acquisition_framework']] = new_af[
                    'id_acquisition_framework']
            # get the id from the uuid
            ds['id_acquisition_framework'] = posted_af_uuid.get(
                ds['uuid_acquisition_framework'])

            ds.pop('uuid_acquisition_framework')
            # get the id of the dataset to check if exists
            id_dataset = TDatasets.get_id(ds['unique_dataset_id'])
            ds['id_dataset'] = id_dataset
            dataset = TDatasets(**ds)

            # if the dataset already exist
            if id_dataset:
                #check if actor exist:
                actor_role = CorDatasetActor.get_actor(
                    id_dataset=id_dataset,
                    id_nomenclature_actor_role=func.ref_nomenclatures.
                    get_id_nomenclature('ROLE_ACTEUR', '1'),
                    id_role=id_user)

                if actor_role is None:
                    actor = CorDatasetActor(
                        id_role=id_user,
                        id_nomenclature_actor_role=func.ref_nomenclatures.
                        get_id_nomenclature('ROLE_ACTEUR', '1'))
                    dataset.cor_dataset_actor.append(actor)

                organism_role = None
                if id_organism:
                    organism_role = CorDatasetActor.get_actor(
                        id_dataset=id_dataset,
                        id_nomenclature_actor_role=func.ref_nomenclatures.
                        get_id_nomenclature('ROLE_ACTEUR', '1'),
                        id_organism=id_organism)
                    if organism_role is None:
                        actor = CorDatasetActor(
                            id_organism=id_organism,
                            id_nomenclature_actor_role=func.ref_nomenclatures.
                            get_id_nomenclature('ROLE_ACTEUR', '1'))
                        dataset.cor_dataset_actor.append(actor)

                # finnaly merge
                DB.session.merge(dataset)
            # if not dataset already in database
            else:
                actor = CorDatasetActor(id_role=id_user,
                                        id_nomenclature_actor_role=func.
                                        ref_nomenclatures.get_id_nomenclature(
                                            'ROLE_ACTEUR', '1'))
                dataset.cor_dataset_actor.append(actor)
                # id_organism in cor_dataset_actor
                if id_organism:
                    actor = CorDatasetActor(
                        id_organism=id_organism,
                        id_nomenclature_actor_role=func.ref_nomenclatures.
                        get_id_nomenclature('ROLE_ACTEUR', '1'))
                    dataset.cor_dataset_actor.append(actor)

                DB.session.add(dataset)

        try:
            DB.session.commit()
            dataset_list_model.append(dataset)
        except SQLAlchemyError as e:
            DB.session.commit()
            DB.session.flush()
            error_msg = """
            Error posting JDD {} \n\n Trace: \n {}
            """.format(ds['unique_dataset_id'], e)
            log.error(error_msg)
            raise GeonatureApiError(error_msg)

        return [d.as_dict() for d in dataset_list_model]
    return {'message': 'Not found'}, 404
Пример #31
0
def post_jdd_from_user(id_user=None, id_organism=None):
    """ Post a jdd from the mtd XML"""
    xml_jdd = None
    xml_jdd = get_jdd_by_user_id(id_user)
    dataset_list_model = []

    if xml_jdd:
        dataset_list = parse_jdd_xml(xml_jdd)
        posted_af_uuid = {}
        for ds in dataset_list:
            # prevent to not fetch, post or merge the same acquisition framework multiple times
            if ds['uuid_acquisition_framework'] not in posted_af_uuid:
                new_af = post_acquisition_framework(
                    uuid=ds['uuid_acquisition_framework'],
                    id_user=id_user,
                    id_organism=id_organism
                )
                # build a cached dict like {'<uuid>': 'id_acquisition_framework}
                posted_af_uuid[ds['uuid_acquisition_framework']] = new_af['id_acquisition_framework']
            # get the id from the uuid
            ds['id_acquisition_framework'] = posted_af_uuid.get(ds['uuid_acquisition_framework'])

            ds.pop('uuid_acquisition_framework')
            # get the id of the dataset to check if exists
            id_dataset = TDatasets.get_id(ds['unique_dataset_id'])
            ds['id_dataset'] = id_dataset
            dataset = TDatasets(**ds)

            # if the dataset already exist
            if id_dataset:
                #check if actor exist:
                actor_role = CorDatasetActor.get_actor(
                    id_dataset=id_dataset,
                    id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature('ROLE_ACTEUR', '1'),
                    id_role=id_user
                )
                
                if actor_role is None:
                    actor = CorDatasetActor(
                        id_role=id_user,
                        id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature('ROLE_ACTEUR', '1')
                    )
                    dataset.cor_dataset_actor.append(actor)
                
                organism_role = None
                if id_organism:
                    organism_role = CorDatasetActor.get_actor(
                        id_dataset=id_dataset,
                        id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature('ROLE_ACTEUR', '1'),
                        id_organism=id_organism
                    )
                    if organism_role is None:
                        actor = CorDatasetActor(
                            id_organism=id_organism,
                            id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature('ROLE_ACTEUR', '1')
                        )
                        dataset.cor_dataset_actor.append(actor)
                
                # finnaly merge
                DB.session.merge(dataset)
            # if not dataset already in database
            else:
                actor = CorDatasetActor(
                    id_role=id_user,
                    id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature('ROLE_ACTEUR', '1')
                )
                dataset.cor_dataset_actor.append(actor)
                # id_organism in cor_dataset_actor
                if id_organism:
                    actor = CorDatasetActor(
                        id_organism=id_organism,
                        id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature('ROLE_ACTEUR', '1')
                    )
                    dataset.cor_dataset_actor.append(actor)

                DB.session.add(dataset)

        try:
            DB.session.commit()
            dataset_list_model.append(dataset)
        except SQLAlchemyError as e:
            DB.session.commit()
            DB.session.flush()
            error_msg = """
            Error posting JDD {} \n\n Trace: \n {}
            """.format(ds['unique_dataset_id'], e)
            log.error(error_msg)                
            raise GeonatureApiError(error_msg)

        return [d.as_dict() for d in dataset_list_model]
    return {'message': 'Not found'}, 404
Пример #32
0
def get_af_and_ds_metadata(info_role):
    """
    Get all AF with their datasets 
    The Cruved in only apply on dataset in order to see all the AF
    where the user have rights with its dataset
    Use in maplist
    Add the CRUVED permission for each row (Dataset and AD)
    
    .. :quickref: Metadata;

    :param info_role: add with kwargs
    :type info_role: TRole
    :returns:  `dict{'data':list<AF with Datasets>, 'with_erros': <boolean>}`
    """
    with_mtd_error = False
    if current_app.config["CAS_PUBLIC"]["CAS_AUTHENTIFICATION"]:
        # synchronise the CA and JDD from the MTD WS
        try:
            mtd_utils.post_jdd_from_user(
                id_user=info_role.id_role, id_organism=info_role.id_organisme
            )
        except Exception as e:
            gunicorn_error_logger.info(e)
            log.error(e)
            with_mtd_error = True
    params = request.args.to_dict()
    params["orderby"] = "dataset_name"
    if "selector" not in params:
        params["selector"] = None
    datasets = filtered_ds_query(info_role, params).distinct().all()
    if len(datasets) == 0:
        return {"data": []}
    ids_dataset_user = TDatasets.get_user_datasets(info_role, only_user=True)

    ids_dataset_organisms = TDatasets.get_user_datasets(info_role, only_user=False)
    ids_afs_user = TAcquisitionFramework.get_user_af(info_role, only_user=True)
    ids_afs_org = TAcquisitionFramework.get_user_af(info_role, only_user=False)
    user_cruved = cruved_scope_for_user_in_module(
        id_role=info_role.id_role, module_code="METADATA",
    )[0]


    #  get all af from the JDD filtered with cruved or af where users has rights
    ids_afs_cruved = (
        [d.id_acquisition_framework for d in get_af_cruved(info_role, as_model=True)]
        if not params["selector"]
        else []
    )
    list_id_af = [d.id_acquisition_framework for d in datasets] + ids_afs_cruved
    afs = (
        filtered_af_query(request.args)
        .filter(TAcquisitionFramework.id_acquisition_framework.in_(list_id_af))
        .order_by(TAcquisitionFramework.acquisition_framework_name)
        .all()
    )
    list_id_af = [af.id_acquisition_framework for af in afs]

    afs_dict = []
    #  get cruved for each AF and prepare dataset
    for af in afs:
        af_dict = af.as_dict(
            True,
            relationships=[
                "creator",
                "cor_af_actor",
                "nomenclature_actor_role",
                "organism",
                "role",
            ],
        )
        af_dict["cruved"] = af.get_object_cruved(
            user_cruved=user_cruved,
            id_object=af.id_acquisition_framework,
            ids_object_user=ids_afs_user,
            ids_object_organism=ids_afs_org,
        )
        af_dict["datasets"] = []
        af_dict["deletable"] = is_af_deletable(af.id_acquisition_framework)
        afs_dict.append(af_dict)

    #  get cruved for each ds and push them in the af
    for d in datasets:
        dataset_dict = d.as_dict(
            recursif=True,
            relationships=[
                "creator",
                "cor_dataset_actor",
                "nomenclature_actor_role",
                "organism",
                "role",
            ],
        )
        if d.id_acquisition_framework not in list_id_af:
            continue
        dataset_dict["cruved"] = d.get_object_cruved(
            user_cruved=user_cruved,
            id_object=d.id_dataset,
            ids_object_user=ids_dataset_user,
            ids_object_organism=ids_dataset_organisms,
        )
        # dataset_dict["observation_count"] = (
        #     DB.session.query(Synthese.cd_nom).filter(Synthese.id_dataset == d.id_dataset).count()
        # )
        dataset_dict["deletable"] = is_dataset_deletable(d.id_dataset)
        af_of_dataset = get_af_from_id(d.id_acquisition_framework, afs_dict)
        af_of_dataset["datasets"].append(dataset_dict)

    afs_resp = {"data": afs_dict}
    if with_mtd_error:
        afs_resp["with_mtd_errors"] = True
    if not datasets:
        return afs_resp, 404
    return afs_resp
Пример #33
0
def create_dataset_user(user):
    """
        After dataset validation, add a personnal AF and JDD so the user
        can add new user.
    """
    af_desc_and_name = "Cadre d'acquisition personnel de {name} {surname}".format(
        name=user["nom_role"], surname=user["prenom_role"])

    #  actor = data productor
    af_productor = CorAcquisitionFrameworkActor(
        id_role=user["id_role"],
        id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature(
            "ROLE_ACTEUR", "6"),
    )
    af_contact = CorAcquisitionFrameworkActor(
        id_role=user["id_role"],
        id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature(
            "ROLE_ACTEUR", "1"),
    )

    new_af = TAcquisitionFramework(
        acquisition_framework_name=af_desc_and_name,
        acquisition_framework_desc=af_desc_and_name +
        " - auto-créé via la demande de création de compte",
        acquisition_framework_start_date=datetime.datetime.now(),
    )

    new_af.cor_af_actor = [af_productor, af_contact]

    db.session.add(new_af)

    ds_desc_and_name = "Jeu de données personnel de {name} {surname}".format(
        name=user["nom_role"], surname=user["prenom_role"])
    ds_productor = CorDatasetActor(
        id_role=user["id_role"],
        id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature(
            "ROLE_ACTEUR", "6"),
    )
    ds_contact = CorDatasetActor(
        id_role=user["id_role"],
        id_nomenclature_actor_role=func.ref_nomenclatures.get_id_nomenclature(
            "ROLE_ACTEUR", "1"),
    )
    # add new JDD: terrestrial and marine = True as default
    new_dataset = TDatasets(
        acquisition_framework=new_af,
        dataset_name=ds_desc_and_name,
        dataset_shortname=ds_desc_and_name +
        " - auto-créé via la demande de création de compte",
        dataset_desc=ds_desc_and_name,
        marine_domain=True,
        terrestrial_domain=True,
    )
    new_dataset.cor_dataset_actor = [ds_productor, ds_contact]
    db.session.add(new_dataset)

    for module_code in current_app.config['ACCOUNT_MANAGEMENT'][
            'DATASET_MODULES_ASSOCIATION']:
        module = TModules.query.filter_by(
            module_code=module_code).one_or_none()
        if module is None:
            warn("Module code '{}' does not exist, can not associate dataset.".
                 format(module_code))
            continue
        new_dataset.modules.append(module)

    db.session.commit()
Пример #34
0
 def user_is_in_dataset_actor(self, user):
     return self.id_dataset in TDatasets.get_user_datasets(user)
Пример #35
0
def import_all_dataset_af_and_actors(table_name):
    file_handler = logging.FileHandler('/tmp/uuid_ca.txt')
    file_handler.setLevel(logging.CRITICAL)
    log.addHandler(file_handler)
    datasets = DB.engine.execute(f"SELECT * FROM {table_name}")
    for d in datasets:
        xml_jdd = get_jdd_by_uuid(str(d.unique_dataset_id))
        if xml_jdd:
            ds_list = parse_jdd_xml(xml_jdd)
            if ds_list:
                ds = ds_list[0]
                inpn_user = get_user_from_id_inpn_ws(ds['id_digitizer'])
                # get user info from id_digitizer
                if inpn_user:
                    # insert user id digitizer
                    insert_user_and_org(inpn_user)
                    actors = ds.pop("actors")
                    # prevent to not fetch, post or merge the same acquisition framework multiple times
                    new_af = post_acquisition_framework(
                        uuid=ds["uuid_acquisition_framework"], )
                    # get the id from the uuid
                    ds["id_acquisition_framework"] = new_af[
                        'id_acquisition_framework']
                    log.critical(str(new_af['id_acquisition_framework']) + ",")
                    ds.pop("uuid_acquisition_framework")
                    # get the id of the dataset to check if exists
                    id_dataset = TDatasets.get_id(ds["unique_dataset_id"])
                    ds["id_dataset"] = id_dataset
                    # search nomenclature
                    ds_copy = copy(ds)
                    for key, value in ds_copy.items():
                        if key.startswith("id_nomenclature"):
                            if value is not None:
                                ds[key] = func.ref_nomenclatures.get_id_nomenclature(
                                    NOMENCLATURE_MAPPING.get(key), value)
                            else:
                                ds.pop(key)

                    #  set validable = true
                    ds["validable"] = True
                    dataset = TDatasets(**ds)
                    # if the dataset already exist
                    if id_dataset:
                        # delete cor_ds_actor
                        dataset.id_dataset = id_dataset

                        delete_q = CorDatasetActor.__table__.delete().where(
                            CorDatasetActor.id_dataset == id_dataset)
                        DB.session.execute(delete_q)
                        DB.session.commit()

                        # create the correlation links
                        create_cor_object_actors(actors, dataset)
                        add_dataset_module(dataset)
                        DB.session.merge(dataset)

                    # its a new DS
                    else:
                        # set the dataset as activ
                        dataset.active = True
                        # create the correlation links
                        create_cor_object_actors(actors, dataset)
                        add_dataset_module(dataset)
                        # Add the new DS
                        DB.session.add(dataset)
                    # try to commit
                    try:
                        DB.session.commit()
                    # TODO catch db error ?
                    except SQLAlchemyError as e:
                        error_msg = "Error posting a dataset\nTrace:\n{} \n\n ".format(
                            e)
                        print(error_msg)
                else:
                    print("NO USER FOUND")
            else:
                "NO JDD IN XML ????"
        else:
            print("JDD NOT FOUND")
Пример #36
0
 def user_is_in_dataset_actor(self, user):
     return self.id_dataset in TDatasets.get_user_datasets(user)