예제 #1
0
파일: pipeline.py 프로젝트: aschle/bees-sna
def generate_network(index, file_list, confidence, distance, ilen):
    
	xmax = 3000

	# one df per cam
	dataframes = np.empty(4, dtype=object)

	for i in list(range(4)):
		fc = load_frame_container(file_list[i])
		df = prep.get_dataframe2(fc)
		df = prep.calcIds(df,confidence)

		camIdx = int(file_list[i].split("/")[-1].split("_")[1])
		dataframes[camIdx] = df

	# cam 0 und cam1 nach rechts verschieben
	dataframes[0].xpos = dataframes[0].xpos + xmax
	dataframes[1].xpos = dataframes[1].xpos + xmax

	# Seiten zusammenfugen
	side0 = pd.concat([dataframes[3], dataframes[0]])
	side1 = pd.concat([dataframes[2], dataframes[1]])

	close1 = prep.get_close_bees_ckd(side0, distance)
	close2 = prep.get_close_bees_ckd(side1, distance)

	close = pd.concat([close1,close2])

	p = prep.bee_pairs_to_timeseries(close)

	return prep.extract_interactions(p,ilen)
예제 #2
0
파일: idstat.py 프로젝트: aschle/bees-sna
def working(enu, path, b, e, confidence, year):

    df0 = prep.getDF(path, b, e, 0)
    df1 = prep.getDF(path, b, e, 1)
    df2 = prep.getDF(path, b, e, 2)
    df3 = prep.getDF(path, b, e, 3)

    data = pd.concat([df0, df1, df2, df3])

    id_list = [0] * 4096

    if data.shape[0] == 0:
        print('Empty DF: {} - {}'.format(
            datetime.datetime.fromtimestamp(b, tz=pytz.UTC),
            datetime.datetime.fromtimestamp(e, tz=pytz.UTC)))

    else:
        data = prep.calcIds(data, confidence, year)

        df = DataFrame(data.groupby(by="id").size(),
                       columns=["fre"]).reset_index()

        print("{}-{}-{}-{}".format(
            enu, df.shape, datetime.datetime.fromtimestamp(b, tz=pytz.UTC),
            datetime.datetime.fromtimestamp(e, tz=pytz.UTC)))
        for r in df.iterrows():
            id_list[r[1].id] = r[1].fre

    return np.array(id_list)
예제 #3
0
def generate_networks(index,
                      file_list,
                      confidence=.95,
                      distance=160,
                      ilen=3,
                      window_size=256):
    print("process {} - start".format(index))

    xmax = 3000

    # list of networks
    network_list = []

    # one df per cam
    dataframes = np.empty(4, dtype=object)

    for i in list(range(4)):
        fc = load_frame_container(file_list[i])
        df = prep.get_dataframe2(fc)
        df = prep.calcIds(df, confidence)

        camIdx = int(file_list[i].split("/")[-1].split("_")[1])
        dataframes[camIdx] = df

    # cam 0 und cam1 nach rechts verschieben
    dataframes[0].xpos = dataframes[0].xpos + xmax
    dataframes[1].xpos = dataframes[1].xpos + xmax

    # Seiten zusammenfugen
    side0 = pd.concat([dataframes[3], dataframes[0]])
    side1 = pd.concat([dataframes[2], dataframes[1]])

    close1 = prep.get_close_bees_ckd(side0, distance)
    close2 = prep.get_close_bees_ckd(side1, distance)

    close = pd.concat([close1, close2])

    p = prep.bee_pairs_to_timeseries(close)

    for w in list(range(int(1024 / window_size))):
        part = p.ix[:, window_size * w:window_size * (w + 1)]
        edges = prep.extract_interactions(part, ilen)
        g = prep.create_graph2(edges)
        network_list.append(((index * 1024) + (w * window_size), g))

    print("process {} - end - {}".format(index, len(network_list)))
    return network_list
예제 #4
0
def generate_network(index, file, confidence, ilens, distances):

    fc = load_frame_container(file)
    df = prep.get_dataframe2(fc)
    df = prep.calcIds(df, confidence)

    result = []

    for distance in distances:
        close = prep.get_close_bees_ckd(df, distance)
        p = prep.bee_pairs_to_timeseries(close)

        for ilen in ilens:
            r = prep.extract_interactions(p, ilen)
            result.append((ilen, distance, r))

    return result
예제 #5
0
def generate_network(enu, path, b, e, confidence, distance, ilen, year, gap):

    xmax = 3000
    offset = 2 * distance

    parts = np.empty(4, dtype=object)

    abbrechen = False

    stat = []

    # one df per camera
    for i in list(range(4)):

        df = prep.getDF(path, b, e, i)

        numframes = 0
        if (df.shape[0] != 0):
            numframes = df.groupby(by='frame_idx').size().shape[0]

        stat.append(numframes)

        df = prep.calcIds(df, confidence, year)

        # Abbrechen, wenn ein DF leer ist
        if (df.shape[0] == 0):
            abbrechen = True

        parts[i] = df

    if abbrechen == True:
        print("#{}: From {} to {} - {}".format(
            enu, datetime.datetime.fromtimestamp(b, tz=pytz.UTC),
            datetime.datetime.fromtimestamp(e, tz=pytz.UTC), stat))
        return Series()

    if year == 2015:
        # cam 0 und cam1 nach rechts verschieben
        parts[0].xpos = parts[0].xpos + xmax + offset
        parts[1].xpos = parts[1].xpos + xmax + offset

        # Seiten zusammenfugen
        side0 = pd.concat([parts[3], parts[0]])
        side1 = pd.concat([parts[2], parts[1]])

    if year == 2016:
        # cam 1 und cam 3 nach rechts verschieben
        parts[1].xpos = parts[1].xpos + xmax + offset
        parts[3].xpos = parts[3].xpos + xmax + offset

        # Syncronisieren der Kameras pro Seite
        parts[0], parts[1] = prep.mapping(parts[0], parts[1])
        parts[2], parts[3] = prep.mapping(parts[2], parts[3])

        d0 = len(parts[0].frame_idx.unique())
        d1 = len(parts[1].frame_idx.unique())
        d2 = len(parts[2].frame_idx.unique())
        d3 = len(parts[3].frame_idx.unique())

        print("#{}: From {} to {} - {} - {} {} {} {}".format(
            enu, datetime.datetime.fromtimestamp(b, tz=pytz.UTC),
            datetime.datetime.fromtimestamp(e, tz=pytz.UTC), stat, d0, d1, d2,
            d3))

        # Seiten zusammenfugen
        side0 = pd.concat([parts[0], parts[1]])
        side1 = pd.concat([parts[2], parts[3]])

    dt = datetime.datetime.fromtimestamp(b, tz=pytz.UTC)
    # Detectionen wegschmeißen, dessen ID insgesamt sehr wenig detektiert wurde
    side0 = prep.removeDetectionsList(side0, dt.strftime("%Y-%m-%d"))
    side1 = prep.removeDetectionsList(side1, dt.strftime("%Y-%m-%d"))

    close1 = prep.get_close_bees_ckd(side0, distance)
    close2 = prep.get_close_bees_ckd(side1, distance)

    close = pd.concat([close1, close2])

    # Zeitreihe für Paare machen
    p = prep.bee_pairs_to_timeseries(close)

    # Coorect pair time series
    p_corrected = p.apply(prep.fill_gaps, axis=1, args=[gap])

    return prep.extract_interactions(p_corrected, ilen)
예제 #6
0
    npa = np.array(file).reshape(int(l), 4)
    return npa


files = get_files(path)

interactions = Series()

for file_list in files:

    dataframes = np.empty(4, dtype=object)

    for i in list(range(4)):
        fc = load_frame_container(file_list[i])
        df = prep.get_dataframe(fc)
        df = prep.calcIds(df, CONFIDENCE)

        camIdx = int(file_list[i].split("/")[-1].split("_")[1])
        dataframes[camIdx] = df

    # cam 0 und cam1 nach rechts verschieben
    dataframes[0].xpos = dataframes[0].xpos + xmax
    dataframes[1].xpos = dataframes[1].xpos + xmax

    # Seiten zusammenfugen
    side0 = pd.concat([dataframes[3], dataframes[0]])
    side1 = pd.concat([dataframes[2], dataframes[1]])

    close1 = prep.get_close_bees(side0, DISTANCE)
    close2 = prep.get_close_bees(side1, DISTANCE)
예제 #7
0
파일: script.py 프로젝트: aschle/bees-sna
CONFIDENCE = c
DISTANCE = d

xmax = 3000
ymax = 4000
LENGTH = l

path = f+p

fc0 = prep.get_fc(path,0)
fc1 = prep.get_fc(path,1)
fc2 = prep.get_fc(path,2)
fc3 = prep.get_fc(path,3)

df3 = prep.get_dataframe(fc3)
df3 = prep.calcIds(df3,CONFIDENCE)
df0 = prep.get_dataframe(fc0)
df0 = prep.calcIds(df0,CONFIDENCE)

df2 = prep.get_dataframe(fc2)
df2 = prep.calcIds(df2,CONFIDENCE)
df1 = prep.get_dataframe(fc1)
df1 = prep.calcIds(df1,CONFIDENCE)

df0.xpos = df0.xpos + xmax
df1.xpos = df1.xpos + xmax

side0 = pd.concat([df3, df0])
side1 = pd.concat([df2, df1])

close1 = prep.get_close_bees(side0, DISTANCE)
예제 #8
0
def run(path_to_db, path_to_repo, conf, start_string, time_delta, year):

    db_path = path_to_db
    conn = sqlite3.connect(db_path)
    c = conn.cursor()
    createAllTables(c)

    repo = Repository(path_to_repo)
    confidence = conf

    start = start_string
    start_dt = datetime.datetime.strptime(
        start, "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=pytz.UTC)
    start_ts = start_dt.timestamp()

    end_dt = start_dt + datetime.timedelta(hours=time_delta)
    end_ts = end_dt.timestamp()

    files = list(repo.iter_fnames(begin=start_ts, end=end_ts))
    print("Number of files: {}".format(len(files)))

    # ADD ALL THE STUFF TO THE DB
    #############################
    my_fc_id = 0
    my_frame_id = 0

    # alle dateien bzw. FrameConatiner interieren
    for file in files:
        print("Progess: {}/{}".format(my_fc_id + 1, len(files)))
        fc = load_frame_container(file)

        # pro Framecontainer ein Eintrag in die FrameContainer Table machen
        c.execute(
            "insert into frame_container (fc_id, id, cam_id, from_ts, to_ts) values (?, ?, ?, ?, ?)",
            (my_fc_id, str(fc.id), fc.camId, fc.fromTimestamp, fc.toTimestamp))

        # alle Frames iterieren
        tpls = []

        for f in fc.frames:
            # pro frame einen Eintrag in Frame Tabelle machen
            c.execute(
                "insert into frame (frame_id, fc_id, timestamp) values (?, ?, ?)",
                (my_frame_id, my_fc_id, f.timestamp))

            # alle Detections iterieren
            for d in f.detectionsUnion.detectionsDP:
                d = Detection(my_frame_id, d.xpos, d.ypos, d.zRotation,
                              list(d.decodedId))
                tpls.append(d)

            # hochzaehlen
            my_frame_id += 1

        df = pd.DataFrame(tpls)
        df = prep.calcIds(df, confidence, year)
        df.drop('confidence', axis=1, inplace=True)

        # Detections zu db hinzufuegen
        df.to_sql('DETECTIONS', conn, if_exists='append', index=False)

        # hochzaehlen!
        my_fc_id += 1

    conn.commit()
    conn.close()