Esempio n. 1
0
def calculate_absolute_features(id):
    """ Calculate the absolute features for the dataset id.

    Keyword arguments:
    id -- id of the dataset

    """
    # increasing performance of these computations
    # t0 = datetime.datetime.utcnow()
    # create new db session
    session = create_session()
    # get the dataset row from the db
    dataset = session.query(Dataset).filter_by(id=id)
    # get needed values
    dataset[0].status = 'Calculating absolute features'
    dataset[0].progress = 5
    # commit status and progress bar changes
    #session.commit()

    # tmp query to get all distinct animal ids from the dataset
    query = session.query(Movement_data.time, Movement_data.animal_id,
                          functions.ST_X(Movement_data.position), functions.ST_Y(Movement_data.position)) \
        .filter_by(dataset_id=id).order_by('time')
    # read into pandas frame for faster calculation
    df = pd.read_sql(query.statement, query.session.bind)
    # rename a column
    df.rename(columns={'ST_X_1': 'x', 'ST_Y_1': 'y'}, inplace=True)

    # Extracting the features via mkit
    df = mkit.extract_features(df, dataset[0].fps).sort_values(
        ['animal_id', 'time'])

    df = df.drop(columns=['stopped'])
    df = df.rename(
        columns={
            'distance': 'metric_distance',
            'average_speed': 'speed',
            'average_acceleration': 'acceleration'
        })
    try:
        for index, row in df.iterrows():
            query = Movement_data(dataset_id=id, **OrderedDict(row))
            session.merge(query)

        # change the progress bar
        dataset[0].progress = 50
        # add the data to the database
        session.commit()

    except Exception as e:
        # Something went wrong when calculating absolute features
        print(e)
        session.rollback()
        dataset[0].status = 'Error - calculating absolute features ' + str(
            e)[0:200]
        dataset[0].error = True
        session.commit()
        session.remove()

    session.remove()
Esempio n. 2
0
def calculate_network(dataset_id, network_id):
    """ Calculate the dynamic network for the dataset and the chosen weighted features

    Keyword arguments:
    dataset_id -- id of the dataset
    network_id -- in combination with the dataset_id the primary key
    """
    # start = time.time()
    # create new db session for the new spanned process
    session = create_session()
    try:
        network_model = session.query(Network).filter_by(dataset_id=dataset_id, network_id=network_id).first()

        # numpy array of the input weights
        weights = np.array([network_model.metric_distance, network_model.speed, network_model.acceleration,
                            network_model.distance_centroid, network_model.direction, network_model.euclidean_distance,
                            network_model.euclidean_distance])

        # new way to compute the distance matrix
        # get the movement data
        query = session.query(Movement_data.time, Movement_data.animal_id, Movement_data.metric_distance,
                              Movement_data.speed, Movement_data.acceleration, Movement_data.distance_centroid,
                              Movement_data.direction,
                              functions.ST_X(Movement_data.position), functions.ST_Y(Movement_data.position)) \
            .filter_by(dataset_id=dataset_id).order_by('time', 'animal_id')

        # read into pandas frame for faster calculation
        df = pd.read_sql(query.statement, query.session.bind)
        df.rename(columns={'ST_X_1': 'x', 'ST_Y_1': 'y'}, inplace=True)
        # normalize the columns
        scaler = MinMaxScaler()
        df[['metric_distance', 'speed', 'acceleration', 'distance_centroid', 'direction', 'x',
            'y']] = scaler.fit_transform(
            df[['metric_distance', 'speed', 'acceleration', 'distance_centroid', 'direction', 'x',
                'y']])

        # group by time for the network computation for each time frame
        grouped_df = df.groupby(['time'])  # .apply(lambda g: pd.Series(distance.pdist(g), index=["D1", "D2", "D3"]))
        # compute network and hierarhcy
        two_result = applyParallel(grouped_df, weights)

        result_network = two_result[0]
        result_hclust = two_result[1]

        # save the results in the database
        network_model.network = json.dumps(result_network)
        network_model.hierarchy = json.dumps(result_hclust, separators=(',', ':'))
        network_model.finished = True
        session.commit()
    except Exception as e:
        # Something went wrong while calculating the network
        print('Error - ' + str(e)[0:200])
        session.rollback()
        network_model.status = 'Error - ' + str(e)[0:200]
        network_model.error = True
        session.commit()
        session.remove()

    session.remove()
 def _serialize(self, value, attr, obj):
     if value is None:
         return value
     else:
         if attr == 'location':
             return {'lng': db.session.scalar(geo_funcs.ST_X(value)), 
             'lat': db.session.scalar(geo_funcs.ST_Y(value))}
         else:
             return None
Esempio n. 4
0
def query_vendor_location(vendor_id):
    return db.session.query(
            Locations.vendor_id,
            Locations.tstamp,
            func.ST_X(Locations.geom).label("lon"),
            func.ST_Y(Locations.geom).label("lat"))\
        .filter_by(vendor_id=vendor_id)\
        .order_by(Locations.tstamp.desc())\
        .first()
Esempio n. 5
0
 def _serialize(self, value, attr, obj):
     if value is None:
         return value
     else:
         if attr == 'geom':
             return [
                 db.session.scalar(geo_funcs.ST_Y(value)),
                 db.session.scalar(geo_funcs.ST_X(value))
             ]
         else:
             return None
Esempio n. 6
0
 def lat(self):
     return functions.ST_X(cast(self.position, Geography))
Esempio n. 7
0
 def get_heatmap_geom(self):
     hour_ago = datetime.now() - timedelta(hours=24)
     q = self.session.query(geo_func.ST_Y(Tweet.geom), geo_func.ST_X(Tweet.geom)).filter(Tweet.created_at > hour_ago).all()
     return q
Esempio n. 8
0
def get_feature_info(id, srid, translations):
    """The function gets the geometry of a parcel by it's ID and does an overlay
    with other administrative layers to get the basic parcelInfo and attribute
    information of the parcel : municipality, local names, and so on

    hint:
    for debbuging the query use str(query) in the console/browser window
    to visualize geom.wkt use session.scalar(geom.wkt)
    """
    try:
        SRS = srid
    except:
        SRS = 2056

    parcelInfo = {}
    parcelInfo['featureid'] = None
    Y = None
    X = None

    if id:
        parcelInfo['featureid'] = id
    # elif request.params.get('X') and request.params.get('Y') :
    # X = int(request.params.get('X'))
    # Y = int(request.params.get('Y'))
    else:
        raise Exception(translations[''])

    if parcelInfo['featureid'] is not None:
        queryresult = DBSession.query(Property).filter_by(
            id=parcelInfo['featureid']).first()
        # We should check unicity of the property id and raise an exception if there are multiple results
    elif (X > 0 and Y > 0):
        if Y > X:
            pointYX = WKTElement('POINT(' + str(Y) + ' ' + str(X) + ')', SRS)
        else:
            pointYX = WKTElement('POINT(' + str(X) + ' ' + str(Y) + ')', SRS)
        queryresult = DBSession.query(Property).filter(
            Property.geom.ST_Contains(pointYX)).first()
        parcelInfo['featureid'] = queryresult.id
    else:
        # to define
        return HTTPBadRequest(translations['HTTPBadRequestMsg'])

    parcelInfo['geom'] = queryresult.geom
    parcelInfo['area'] = int(
        round(DBSession.scalar(queryresult.geom.ST_Area()), 0))

    if isinstance(LocalName, (types.ClassType)) is False:
        queryresult1 = DBSession.query(LocalName).filter(
            LocalName.geom.ST_Intersects(parcelInfo['geom'])).first()
        parcelInfo['lieu_dit'] = queryresult1.nomloc  # Flurname

    queryresult2 = DBSession.query(Town).filter(
        Town.geom.ST_Buffer(1).ST_Contains(parcelInfo['geom'])).first()

    parcelInfo['nummai'] = queryresult.nummai  # Parcel number
    parcelInfo['type'] = queryresult.typimm  # Parcel type
    if 'no_egrid' in queryresult.__table__.columns.keys():
        parcelInfo['no_egrid'] = queryresult.no_egrid
    else:
        parcelInfo['no_egrid'] = translations['noEGRIDtext']

    if parcelInfo['type'] is None:
        parcelInfo['type'] = translations['UndefinedPropertyType']

    if 'numcad' in queryresult2.__table__.columns.keys():
        parcelInfo['nomcad'] = queryresult2.cadnom

    parcelInfo['numcom'] = queryresult.numcom
    parcelInfo['nomcom'] = queryresult2.comnom
    parcelInfo['nufeco'] = queryresult2.nufeco
    parcelInfo['centerX'] = DBSession.scalar(
        functions.ST_X(queryresult.geom.ST_Centroid()))
    parcelInfo['centerY'] = DBSession.scalar(
        functions.ST_Y(queryresult.geom.ST_Centroid()))
    parcelInfo['BBOX'] = get_bbox_from_geometry(
        DBSession.scalar(functions.ST_AsText(queryresult.geom.ST_Envelope())))

    # the get_print_format function is not needed any longer as the paper size has been fixed to A4 by the cantons
    # but we keep the code because the decision will be revoked
    # parcelInfo['printFormat'] = get_print_format(parcelInfo['BBOX'])

    return parcelInfo
Esempio n. 9
0
 def points_to_linestring(self):
     point1 = (DB.session.scalar(geo_funcs.ST_X(self.geom_start)),
               DB.session.scalar(geo_funcs.ST_Y(self.geom_start)))
     point2 = (DB.session.scalar(geo_funcs.ST_X(self.geom_end)),
               DB.session.scalar(geo_funcs.ST_Y(self.geom_end)))
     return LineString([point1, point2])
Esempio n. 10
0
def calculate_mkit_feats(id, session):
    """ Calculate all remaining features, including: mean speed/acceleration, centroid-direction, mean-dist centroid, polarization,

    Keyword arguments:
    id - id of the dataset
    session - db session
    """

    # Get features from database
    query = session.query(Movement_data.time,
                          Movement_data.animal_id,
                          Movement_data.direction,
                          functions.ST_X(Movement_data.position),
                          functions.ST_Y(Movement_data.position)) \
        .filter_by(dataset_id=id).order_by('time')
    # read into pandas frame for faster calculation
    df = pd.read_sql(query.statement, query.session.bind)

    # prepare for analysis
    df.rename(columns={'ST_X_1': 'x', 'ST_Y_1': 'y'}, inplace=True)

    # calculate the centroids and medoids, store in group data
    movement = mkit.centroid_medoid_computation(df, object_output=True)
    # prepare for merge
    movement = movement.rename(
        columns={'distance_to_centroid': 'distance_centroid'})

    # merge movement data with distance_centroid
    for index, row in movement.iterrows():
        query = Movement_data(dataset_id=id, **OrderedDict(row))
        session.merge(query)

    session.commit()
    # Take subset from dataset above, focusing only on group-level
    group = movement.loc[
        movement.animal_id == list(set(movement.animal_id))[0],
        ['time', 'x_centroid', 'y_centroid', 'medoid', 'centroid'
         ]].reset_index(drop=True)

    # compute polarization
    pol = mkit.compute_polarization(df, group_output=True).fillna(0)

    # compute mean speed, acceleration and mean distance to centroid
    mov = mkit.group_movement(movement).fillna(0)

    # compute centroid direction
    cen_dir = mkit.compute_centroid_direction(movement,
                                              group_output=True).fillna(0)

    # merge computed values into group-dataframe
    data_frames = [group, pol, mov, cen_dir]
    group = reduce(
        lambda left, right: pd.merge(left, right, on=['time'], how='left'),
        data_frames)
    # prepare for merging with database
    group = group.rename(
        columns={
            'mean_distance_centroid': 'distance_centroid',
            'centroid_direction': 'direction',
            'mean_speed': 'speed',
            'mean_acceleration': 'acceleration',
            'mean_distance_centroid': 'distance_centroid',
            'total_dist': 'metric_distance',
            'polarization': 'polarisation'
        })

    # first convert centroid coordinates to shape and delete them, then run query.
    for index, row in group.iterrows():
        query = Group_data(dataset_id=id, **OrderedDict(row))
        session.merge(query)

    # add the data to the database
    session.commit()