Exemplo n.º 1
0
def run(gt_file,
        videos,
        images,
        visualize_debug,
        output,
        fix_utc_2014,
        nb_bits=12):
    """
    Converts bb_binary ground truth Cap'n Proto files to hdf5 files and
    extracts the corresponding rois from videos or images.
    """
    def get_filenames(f):
        if f is None:
            return []
        else:
            return [line.rstrip('\n') for line in f.readlines()]

    gen_factory = FrameGeneratorFactory(get_filenames(videos),
                                        get_filenames(images))
    if os.path.exists(output):
        os.remove(output)

    distribution = DistributionCollection([('bits', Bernoulli(), nb_bits)])
    dset = DistributionHDF5Dataset(output, distribution)
    camIdxs = []
    periods = []
    for fname in gt_file:
        fc = load_frame_container(fname)
        camIdx, start_dt, end_dt = parse_video_fname(fname)
        if fix_utc_2014 and start_dt.year == 2014:
            start_dt -= timedelta(hours=2)
        gt_frames = []
        gen = gen_factory.get_generator(camIdx, start_dt)
        for frame, (video_frame, video_filename) in zip(fc.frames, gen):
            gt = {}
            np_frame = convert_frame_to_numpy(frame)
            rois, mask, positions = extract_gt_rois(np_frame, video_frame,
                                                    start_dt)
            for name in np_frame.dtype.names:
                gt[name] = np_frame[name][mask]
            bits = [int_id_to_binary(id)[::-1] for id in gt["decodedId"]]
            gt["bits"] = 2 * np.array(bits, dtype=np.float) - 1
            gt["tags"] = 2 * (rois / 255.).astype(np.float16) - 1
            gt['filename'] = os.path.basename(video_filename)
            gt['camIdx'] = camIdx
            gt_frames.append(gt)
            print('.', end='', flush=True)
        print()
        gt_period = GTPeriod(camIdx, start_dt, end_dt, fname, gt_frames)

        periods.append(
            [int(gt_period.start.timestamp()),
             int(gt_period.end.timestamp())])
        camIdxs.append(gt_period.camIdx)
        append_gt_to_hdf5(gt_period, dset)

    dset.attrs['periods'] = np.array(periods)
    dset.attrs['camIdxs'] = np.array(camIdxs)
    visualize_detection_tiles(dset, os.path.splitext(output)[0])
    dset.close()
Exemplo n.º 2
0
def generate_network(index, file_list, confidence, distance, ilen):
    
	xmax = 3000

	# one df per cam
	dataframes = np.empty(4, dtype=object)

	for i in list(range(4)):
		fc = load_frame_container(file_list[i])
		df = prep.get_dataframe2(fc)
		df = prep.calcIds(df,confidence)

		camIdx = int(file_list[i].split("/")[-1].split("_")[1])
		dataframes[camIdx] = df

	# cam 0 und cam1 nach rechts verschieben
	dataframes[0].xpos = dataframes[0].xpos + xmax
	dataframes[1].xpos = dataframes[1].xpos + xmax

	# Seiten zusammenfugen
	side0 = pd.concat([dataframes[3], dataframes[0]])
	side1 = pd.concat([dataframes[2], dataframes[1]])

	close1 = prep.get_close_bees_ckd(side0, distance)
	close2 = prep.get_close_bees_ckd(side1, distance)

	close = pd.concat([close1,close2])

	p = prep.bee_pairs_to_timeseries(close)

	return prep.extract_interactions(p,ilen)
Exemplo n.º 3
0
def getIDs(m, d, h, files, conf, year):
    a = np.zeros(2**12, dtype='int32')

    for f in files:
        fc = load_frame_container(f)
        df = get_dataframe2(fc)
        df = calcIds(df, conf, year)
        ids = list(df.id)
        for i in ids:
            a[i] += 1
    return (m, d, h, a)
Exemplo n.º 4
0
    def __init__(self):

        self.frames = None
        self.truth = None
        self.tracked = None

        if not os.path.exists(config.DATA_FOLDER):
            print 'Error: folder not found'
            return

        repo = Repository.load(config.DATA_FOLDER)
        start_time = datetime(config.DATE[0],
                              config.DATE[1],
                              config.DATE[2],
                              config.TIME[0],
                              config.TIME[1],
                              tzinfo=pytz.utc)
        end_time = datetime(config.DATE[0],
                            config.DATE[1],
                            config.DATE[2],
                            config.TIME[0],
                            config.TIME[1] + 1,
                            tzinfo=pytz.utc)

        fnames = repo.iter_fnames(begin=start_time, end=end_time)
        for fname in fnames:

            frame_container = load_frame_container(fname)

            cam = frame_container.camId
            self.frames = list(frame_container.frames)

            # break because we only load the first fname
            break

        if not os.path.exists(config.TRACKED_PATHS_FILE):
            print 'Error: file not found'
            return

        with open(config.TRACKED_PATHS_FILE, 'rb') as tracked_paths_file:
            tracked_input = pickle.load(tracked_paths_file)
            self.tracked = tracked_input['paths']

        if not os.path.exists(config.TRUTH_PATHS_FILE):
            print 'Error: file not found'
            return

        with open(config.TRUTH_PATHS_FILE, 'rb') as truth_paths_file:
            truth_input = pickle.load(truth_paths_file)
            self.truth = truth_input['paths']
Exemplo n.º 5
0
	def __init__( self ):

		self.frames = None
		self.truth = None
		self.tracked = None

		if not os.path.exists( config.DATA_FOLDER ):
			print 'Error: folder not found'
			return

		repo = Repository.load( config.DATA_FOLDER )
		start_time = datetime(
			config.DATE[ 0 ], config.DATE[ 1 ], config.DATE[ 2 ],
			config.TIME[ 0 ], config.TIME[ 1 ],
			tzinfo=pytz.utc
		)
		end_time = datetime(
			config.DATE[ 0 ], config.DATE[ 1 ], config.DATE[ 2 ],
			config.TIME[ 0 ], config.TIME[ 1 ]+1,
			tzinfo=pytz.utc
		)

		fnames = repo.iter_fnames( begin=start_time, end=end_time )
		for fname in fnames:

			frame_container = load_frame_container( fname )

			cam = frame_container.camId
			self.frames = list( frame_container.frames )

			# break because we only load the first fname
			break


		if not os.path.exists( config.TRACKED_PATHS_FILE ):
			print 'Error: file not found'
			return

		with open( config.TRACKED_PATHS_FILE, 'rb' ) as tracked_paths_file:
			tracked_input = pickle.load( tracked_paths_file )
			self.tracked = tracked_input[ 'paths' ]


		if not os.path.exists( config.TRUTH_PATHS_FILE ):
			print 'Error: file not found'
			return

		with open( config.TRUTH_PATHS_FILE, 'rb' ) as truth_paths_file:
			truth_input = pickle.load( truth_paths_file )
			self.truth = truth_input[ 'paths' ]
Exemplo n.º 6
0
def run(bb_gt_files, video_dir, image_dir, visualize_debug, force, output):
    """
    Converts bb_binary ground truth Cap'n Proto files to hdf5 files and
    extracts the corresponding rois from videos or images.
    """
    gen_factory = FrameGeneratorFactory(video_dir, image_dir)
    if force and os.path.exists(output):
        os.remove(output)
    dset = HDF5Dataset(output)
    camIdxs = []
    periods = []
    for fname in bb_gt_files:
        fc = load_frame_container(fname)
        camIdx, start_dt, end_dt = parse_video_fname(fname)
        basename = os.path.basename(fname)
        gt_frames = []
        print(basename)
        gen = gen_factory.get_generator(camIdx, start_dt)
        first = True
        for frame, (video_frame, video_filename) in zip(fc.frames, gen):
            gt = {}
            np_frame = convert_frame_to_numpy(frame)
            rois, mask, positions = extract_gt_rois(np_frame, video_frame,
                                                    start_dt)
            for name in np_frame.dtype.names:
                gt[name] = np_frame[name][mask]
            gt["bits"] = np.array(
                [int_id_to_binary(id)[::-1] for id in gt["decodedId"]])
            gt["tags"] = rois
            gt['filename'] = os.path.basename(video_filename)
            gt_frames.append(gt)
            if first and visualize_debug:
                visualize_detections(gt, positions, video_frame)
                first = False

            print('.', end='')
        gt_period = GTPeriod(camIdx, start_dt, end_dt, fname, gt_frames)

        periods.append(
            [int(gt_period.start.timestamp()),
             int(gt_period.end.timestamp())])
        camIdxs.append(gt_period.camIdx)
        append_gt_to_hdf5(gt_period, dset)

    dset.attrs['periods'] = np.array(periods)
    dset.attrs['camIdxs'] = np.array(camIdxs)
    dset.close()
Exemplo n.º 7
0
def generate_networks(index,
                      file_list,
                      confidence=.95,
                      distance=160,
                      ilen=3,
                      window_size=256):
    print("process {} - start".format(index))

    xmax = 3000

    # list of networks
    network_list = []

    # one df per cam
    dataframes = np.empty(4, dtype=object)

    for i in list(range(4)):
        fc = load_frame_container(file_list[i])
        df = prep.get_dataframe2(fc)
        df = prep.calcIds(df, confidence)

        camIdx = int(file_list[i].split("/")[-1].split("_")[1])
        dataframes[camIdx] = df

    # cam 0 und cam1 nach rechts verschieben
    dataframes[0].xpos = dataframes[0].xpos + xmax
    dataframes[1].xpos = dataframes[1].xpos + xmax

    # Seiten zusammenfugen
    side0 = pd.concat([dataframes[3], dataframes[0]])
    side1 = pd.concat([dataframes[2], dataframes[1]])

    close1 = prep.get_close_bees_ckd(side0, distance)
    close2 = prep.get_close_bees_ckd(side1, distance)

    close = pd.concat([close1, close2])

    p = prep.bee_pairs_to_timeseries(close)

    for w in list(range(int(1024 / window_size))):
        part = p.ix[:, window_size * w:window_size * (w + 1)]
        edges = prep.extract_interactions(part, ilen)
        g = prep.create_graph2(edges)
        network_list.append(((index * 1024) + (w * window_size), g))

    print("process {} - end - {}".format(index, len(network_list)))
    return network_list
Exemplo n.º 8
0
def generate_network(index, file, confidence, ilens, distances):

    fc = load_frame_container(file)
    df = prep.get_dataframe2(fc)
    df = prep.calcIds(df, confidence)

    result = []

    for distance in distances:
        close = prep.get_close_bees_ckd(df, distance)
        p = prep.bee_pairs_to_timeseries(close)

        for ilen in ilens:
            r = prep.extract_interactions(p, ilen)
            result.append((ilen, distance, r))

    return result
Exemplo n.º 9
0
def run(bb_gt_files, video_dir, image_dir, visualize_debug, force, output):
    """
    Converts bb_binary ground truth Cap'n Proto files to hdf5 files and
    extracts the corresponding rois from videos or images.
    """
    gen_factory = FrameGeneratorFactory(video_dir, image_dir)
    if force and os.path.exists(output):
        os.remove(output)
    dset = HDF5Dataset(output)
    camIdxs = []
    periods = []
    for fname in bb_gt_files:
        fc = load_frame_container(fname)
        camIdx, start_dt, end_dt = parse_video_fname(fname)
        basename = os.path.basename(fname)
        gt_frames = []
        print(basename)
        gen = gen_factory.get_generator(camIdx, start_dt)
        first = True
        for frame, (video_frame, video_filename) in zip(fc.frames, gen):
            gt = {}
            np_frame = convert_frame_to_numpy(frame)
            rois, mask, positions = extract_gt_rois(np_frame, video_frame, start_dt)
            for name in np_frame.dtype.names:
                gt[name] = np_frame[name][mask]
            gt["bits"] = np.array([int_id_to_binary(id)[::-1] for id in gt["decodedId"]])
            gt["tags"] = rois
            gt['filename'] = os.path.basename(video_filename)
            gt_frames.append(gt)
            if first and visualize_debug:
                visualize_detections(gt, positions, video_frame)
                first = False

            print('.', end='')
        gt_period = GTPeriod(camIdx, start_dt, end_dt, fname, gt_frames)

        periods.append([int(gt_period.start.timestamp()), int(gt_period.end.timestamp())])
        camIdxs.append(gt_period.camIdx)
        append_gt_to_hdf5(gt_period, dset)

    dset.attrs['periods'] = np.array(periods)
    dset.attrs['camIdxs'] = np.array(camIdxs)
    dset.close()
Exemplo n.º 10
0
    def handle(self, *args, **options):
        repo = Repository(options['repo_path'])
        fnames = list(repo.iter_fnames())
        for fn in try_tqdm(fnames):
            fc = load_frame_container(fn)
            fco = FrameContainer(fc_id=fc.id,
                                 fc_path=fn,
                                 video_name=fc.dataSources[0].filename)
            fco.save()

            with transaction.atomic():
                for frame in fc.frames:
                    f = Frame(fc=fco,
                              frame_id=frame.id,
                              index=frame.frameIdx,
                              timestamp=frame.timestamp)
                    f.save()


# start with python manage.py make_db_repo [repo_path]
Exemplo n.º 11
0
    def __init__(self):

        self.frames = None
        self.source = None

        if not os.path.exists(config.DATA_FOLDER):
            print 'Error: folder not found'
            return

        try:

            repo = Repository.load(config.DATA_FOLDER)
            start_time = datetime(config.DATE[0],
                                  config.DATE[1],
                                  config.DATE[2],
                                  config.TIME[0],
                                  config.TIME[1],
                                  tzinfo=pytz.utc)
            end_time = datetime(config.DATE[0],
                                config.DATE[1],
                                config.DATE[2],
                                config.TIME[0],
                                config.TIME[1] + 1,
                                tzinfo=pytz.utc)

            fnames = repo.iter_fnames(begin=start_time, end=end_time)
            for fname in fnames:

                frame_container = load_frame_container(fname)

                cam = frame_container.camId
                self.frames = list(frame_container.frames)
                self.source = frame_container.dataSources[0].filename

                # break because we only load the first fname
                break

        except:

            pass
Exemplo n.º 12
0
	def __init__( self ):

		self.frames = None
		self.source = None

		if not os.path.exists( config.DATA_FOLDER ):
			print 'Error: folder not found'
			return

		try:

			repo = Repository.load( config.DATA_FOLDER )
			start_time = datetime(
				config.DATE[ 0 ], config.DATE[ 1 ], config.DATE[ 2 ],
				config.TIME[ 0 ], config.TIME[ 1 ],
				tzinfo=pytz.utc
			)
			end_time = datetime(
				config.DATE[ 0 ], config.DATE[ 1 ], config.DATE[ 2 ],
				config.TIME[ 0 ], config.TIME[ 1 ]+1,
				tzinfo=pytz.utc
			)

			fnames = repo.iter_fnames( begin=start_time, end=end_time )
			for fname in fnames:

				frame_container = load_frame_container( fname )

				cam = frame_container.camId
				self.frames = list( frame_container.frames )
				self.source = frame_container.dataSources[ 0 ].filename

				# break because we only load the first fname
				break

		except:

			pass
Exemplo n.º 13
0
 def get_binary(self):
     return load_frame_container(self.fc_path)
Exemplo n.º 14
0
def get_fc(path, camId):
    repo = Repository(path)
    file = list(repo.iter_fnames(cam=camId))[0]
    fc = load_frame_container(file)
    return fc
Exemplo n.º 15
0
def create_frame_metadata_table(repository_path,
                                host,
                                user,
                                password,
                                database="beesbook",
                                tablename_suffix="",
                                progress="tqdm"):
    """Reads a bb_binary.Repository and puts all the frame IDs, frame containers and their metadata (e.g. global index for frames)
    into two new database tables.

    Arguments:
        repository_path: string
            Path to a bb_binary.Repository.
        host, user, password, database: string
            Credentials for the database server.
        tablename_suffix: string
            Suffix for the table names (e.g. "2019_berlin").
        progress: string ("tqdm"/"tqdm_notebook") or callable
            Optional. Used to display the import progress.
    """
    from collections import defaultdict
    import bb_binary
    repo = bb_binary.Repository(repository_path)

    cam_id_indices = defaultdict(int)

    if progress is not None:
        if progress == "tqdm":
            import tqdm
            progress = tqdm.tqdm
        elif progress == "tqdm_notebook":
            import tqdm
            progress = tqdm.tqdm_notebook
    else:
        progress = lambda x: x

    with psycopg2.connect(host=host,
                          user=user,
                          password=password,
                          database=database) as con:
        cursor = con.cursor()

        framecontainer_tablename = "bb_framecontainer_metadata_" + tablename_suffix
        cursor.execute("""
        CREATE TABLE IF NOT EXISTS {} (
            fc_id numeric(32,0) NOT NULL,
            fc_path text NOT NULL,
            video_name text NOT NULL
        );

        """.format(framecontainer_tablename))

        frame_tablename = "bb_frame_metadata_" + tablename_suffix
        cursor.execute("""
        CREATE TABLE IF NOT EXISTS {} (
            frame_id numeric(32,0) NOT NULL,
            frame_number bigint NOT NULL,
            cam_id smallint NOT NULL,
            index integer NOT NULL,
            fc_id numeric(32,0) NOT NULL,
            "timestamp" double precision NOT NULL,
            "datetime" timestamp with time zone NOT NULL
        );

        """.format(frame_tablename))

        framecontainer_statement = """
                       INSERT INTO {} (fc_id, fc_path, video_name) VALUES %s
                        """.format(framecontainer_tablename)
        frame_statement = """
                       INSERT INTO {} (frame_id, frame_number, cam_id,
                           index, fc_id, "timestamp", "datetime") VALUES %s
                        """.format(frame_tablename)

        def commit_batch(batch, statement):
            if len(batch) == 0:
                return
            psycopg2.extras.execute_values(cursor,
                                           statement,
                                           batch,
                                           page_size=200)
            del batch[:]

        frame_batch = []

        def commit_frame_batch():
            commit_batch(frame_batch, frame_statement)

        framecontainer_batch = []

        def commit_framecontainer_batch():
            commit_batch(framecontainer_batch, framecontainer_statement)

        for fc_path in progress(repo.iter_fnames()):
            fc = bb_binary.load_frame_container(fc_path)

            fc_id = fc.id
            cam_id = fc.camId
            video_path = fc.dataSources[0].filename

            next_fc_frame_number = cam_id_indices[cam_id]

            for frame in fc.frames:
                frame_id = frame.id
                frame_timestamp = frame.timestamp
                frame_datetime = datetime.datetime.utcfromtimestamp(
                    frame_timestamp)
                frame_datetime = pytz.utc.localize(frame_datetime)
                frame_index = frame.frameIdx

                frame_batch.append(
                    (frame_id, next_fc_frame_number, cam_id, frame_index,
                     fc_id, frame_timestamp, frame_datetime))

                next_fc_frame_number += 1

                if len(frame_batch) > 2000:
                    commit_frame_batch()

            cam_id_indices[cam_id] = next_fc_frame_number
            commit_frame_batch()

            framecontainer_batch.append((fc_id, fc_path, video_path))
            if len(framecontainer_batch) > 100:
                commit_framecontainer_batch()

        commit_framecontainer_batch()
    con.close()
Exemplo n.º 16
0
def main():

	# loading data
	if not os.path.exists( config.DATA_FOLDER ):
		print 'Error: folder not found'
		return

	dset_store = ds.DetectionSetStore()

	repo = Repository.load( config.DATA_FOLDER )
	start_time = datetime(
		config.DATE[ 0 ], config.DATE[ 1 ], config.DATE[ 2 ],
		config.TIME[ 0 ], config.TIME[ 1 ],
		tzinfo=pytz.utc
	)
	end_time = datetime(
		config.DATE[ 0 ], config.DATE[ 1 ], config.DATE[ 2 ],
		config.TIME[ 0 ], config.TIME[ 1 ]+1,
		tzinfo=pytz.utc
	)

	fnames = repo.iter_fnames( begin=start_time, end=end_time )
	for fname in fnames:

		frame_container = load_frame_container( fname )

		cam = frame_container.camId
		dset_store.source = frame_container.dataSources[ 0 ].filename

		previous_timestamp = None
		frame_index = config.FRAME_START

		for frame in list( frame_container.frames )[ config.FRAME_START : config.FRAME_END + 1 ]:

			timestamp = ds.TimeStamp( frame_index, cam )
			timestamp.connect_with_previous( previous_timestamp )
			previous_timestamp = timestamp

			dset = ds.DetectionSet()
			dset_store.store[ timestamp ] = dset

			data = convert_frame_to_numpy( frame )

			for detection_data in data:

				dset.add_detection( ds.Detection(
					detection_data[ 'idx' ],
					timestamp,
					np.array( [ detection_data[ 'ypos' ], detection_data[ 'xpos' ] ] ),  # rotated, otherwise will be portrait orientation
					detection_data[ 'localizerSaliency' ],
					detection_data[ 'decodedId' ][::-1]  # reversed, we want least significant bit last
				) )

			dset.build_kd_tree()

			frame_index += 1

		# break because we only load the first fname
		break


	# loading truth
	if not os.path.isfile( config.PATHS_FILE ):
		print 'Error: file not found'
		return

	with open( config.PATHS_FILE, 'rb' ) as paths_file:
		input = pickle.load( paths_file )

	if input[ 'source' ] != dset_store.source:
		print 'Error: data sources do not match'
		return

	paths_input = input[ 'paths' ]


	# match
	for tag_id in paths_input.keys():

		for path_id in paths_input[ tag_id ].keys():

			for frame,detection_data in paths_input[ tag_id ][ path_id ].items():

				old_detection_id, pos_x, pos_y, readability = detection_data
				timestamp = dset_store.get_timestamp( frame )

				new_detection_id = None
				distance = None

				if timestamp is not None and readability < 3:

					dset = dset_store.get( timestamp )
					distances, indices = dset.kd_tree.query( [ pos_x, pos_y ], k=1 )
					distance = distances[ 0 ][ 0 ]
					index = indices[ 0 ][ 0 ]

					if distance <= MATCH_DISTANCE_LIMIT:
						new_detection_id = index

				# use this if you're matching to the same output for test purposes:
				#if new_detection_id	!= old_detection_id:
				#	print 'mismatch old: ' + str(old_detection_id) + ', new: ' + str(new_detection_id)

				paths_input[ tag_id ][ path_id ][ frame ] = ( new_detection_id, pos_x, pos_y, readability )


	# saving truth
	with open( config.PATHS_FILE, 'wb' ) as paths_file:
		pickle.dump( input, paths_file )


	print 'done'
Exemplo n.º 17
0
    a = [f.split('/')[-1].split("_")[1] for f in file]
    l = len(a) / 4
    npa = np.array(file).reshape(int(l), 4)
    return npa


files = get_files(path)

interactions = Series()

for file_list in files:

    dataframes = np.empty(4, dtype=object)

    for i in list(range(4)):
        fc = load_frame_container(file_list[i])
        df = prep.get_dataframe(fc)
        df = prep.calcIds(df, CONFIDENCE)

        camIdx = int(file_list[i].split("/")[-1].split("_")[1])
        dataframes[camIdx] = df

    # cam 0 und cam1 nach rechts verschieben
    dataframes[0].xpos = dataframes[0].xpos + xmax
    dataframes[1].xpos = dataframes[1].xpos + xmax

    # Seiten zusammenfugen
    side0 = pd.concat([dataframes[3], dataframes[0]])
    side1 = pd.concat([dataframes[2], dataframes[1]])

    close1 = prep.get_close_bees(side0, DISTANCE)
Exemplo n.º 18
0
	def load_data( self ):

		if not os.path.exists( config.DATA_FOLDER ):
			print 'Error: folder not found'
			return

		self.block_inputs( True )

		self.dset_store = ds.DetectionSetStore()
		self.path_manager = None
		self.paths_load_progress.setValue( 0 )
		self.paths_load_label.setText( '' )


		try:
			repo = Repository( config.DATA_FOLDER )
			start_time = datetime(
				config.DATE[ 0 ], config.DATE[ 1 ], config.DATE[ 2 ],
				config.TIME[ 0 ], config.TIME[ 1 ],
				tzinfo=pytz.utc
			)

			fnames = repo.iter_fnames( begin=start_time )
			for fname in fnames:

				frame_container = load_frame_container( fname )

				cam = frame_container.camId
				#frame_container.fromTimestamp              # already available
				#frame_container.toTimestamp                # already available

				self.dset_store.source = frame_container.dataSources[ 0 ].filename

				previous_timestamp = None

				self.data_load_progress.setMaximum( config.FRAME_END + 1 - config.FRAME_START )
				self.app.processEvents()

				frame_index = config.FRAME_START

				for frame in list( frame_container.frames )[ config.FRAME_START : config.FRAME_END + 1 ]:

					#timestamp = frame.timestamp  # not included yet
					#frame.id                     # not included yet

					timestamp = ds.TimeStamp( frame_index, cam )
					timestamp.connect_with_previous( previous_timestamp )
					previous_timestamp = timestamp

					dset = ds.DetectionSet()
					self.dset_store.store[ timestamp ] = dset

					data = convert_frame_to_numpy( frame )

					for detection_data in data:

						dset.add_detection( ds.Detection(
							detection_data[ 'idx' ],
							timestamp,
							np.array( [ detection_data[ 'ypos' ], detection_data[ 'xpos' ] ] ),  # rotated, otherwise will be portrait orientation
							detection_data[ 'localizerSaliency' ],
							detection_data[ 'decodedId' ][::-1]  # reversed, we want least significant bit last
						) )

					frame_index += 1

					self.data_load_progress.setValue( frame_index - config.FRAME_START )
					self.app.processEvents()

				self.data_load_label.setText( str( len( self.dset_store.store ) ) + ' frames loaded' )
				self.app.processEvents()

				# break because we only load the first fname
				break

		except:

			pass

		self.block_inputs( False )
Exemplo n.º 19
0
    def load_data(self):

        if not os.path.exists(config.DATA_FOLDER):
            print 'Error: folder not found'
            return

        self.block_inputs(True)

        self.dset_store = ds.DetectionSetStore()
        self.path_manager = None
        self.paths_load_progress.setValue(0)
        self.paths_load_label.setText('')

        try:
            repo = Repository(config.DATA_FOLDER)
            start_time = datetime(config.DATE[0],
                                  config.DATE[1],
                                  config.DATE[2],
                                  config.TIME[0],
                                  config.TIME[1],
                                  tzinfo=pytz.utc)

            fnames = repo.iter_fnames(begin=start_time)
            for fname in fnames:

                frame_container = load_frame_container(fname)

                cam = frame_container.camId
                #frame_container.fromTimestamp              # already available
                #frame_container.toTimestamp                # already available

                self.dset_store.source = frame_container.dataSources[
                    0].filename

                previous_timestamp = None

                self.data_load_progress.setMaximum(config.FRAME_END + 1 -
                                                   config.FRAME_START)
                self.app.processEvents()

                frame_index = config.FRAME_START

                for frame in list(frame_container.frames
                                  )[config.FRAME_START:config.FRAME_END + 1]:

                    #timestamp = frame.timestamp  # not included yet
                    #frame.id                     # not included yet

                    timestamp = ds.TimeStamp(frame_index, cam)
                    timestamp.connect_with_previous(previous_timestamp)
                    previous_timestamp = timestamp

                    dset = ds.DetectionSet()
                    self.dset_store.store[timestamp] = dset

                    data = convert_frame_to_numpy(frame)

                    for detection_data in data:

                        dset.add_detection(
                            ds.Detection(
                                detection_data['idx'],
                                timestamp,
                                np.array(
                                    [
                                        detection_data['ypos'],
                                        detection_data['xpos']
                                    ]
                                ),  # rotated, otherwise will be portrait orientation
                                detection_data['localizerSaliency'],
                                detection_data['decodedId']
                                [::
                                 -1]  # reversed, we want least significant bit last
                            ))

                    frame_index += 1

                    self.data_load_progress.setValue(frame_index -
                                                     config.FRAME_START)
                    self.app.processEvents()

                self.data_load_label.setText(
                    str(len(self.dset_store.store)) + ' frames loaded')
                self.app.processEvents()

                # break because we only load the first fname
                break

        except:

            pass

        self.block_inputs(False)
Exemplo n.º 20
0
def run(path_to_db, path_to_repo, conf, start_string, time_delta, year):

    db_path = path_to_db
    conn = sqlite3.connect(db_path)
    c = conn.cursor()
    createAllTables(c)

    repo = Repository(path_to_repo)
    confidence = conf

    start = start_string
    start_dt = datetime.datetime.strptime(
        start, "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=pytz.UTC)
    start_ts = start_dt.timestamp()

    end_dt = start_dt + datetime.timedelta(hours=time_delta)
    end_ts = end_dt.timestamp()

    files = list(repo.iter_fnames(begin=start_ts, end=end_ts))
    print("Number of files: {}".format(len(files)))

    # ADD ALL THE STUFF TO THE DB
    #############################
    my_fc_id = 0
    my_frame_id = 0

    # alle dateien bzw. FrameConatiner interieren
    for file in files:
        print("Progess: {}/{}".format(my_fc_id + 1, len(files)))
        fc = load_frame_container(file)

        # pro Framecontainer ein Eintrag in die FrameContainer Table machen
        c.execute(
            "insert into frame_container (fc_id, id, cam_id, from_ts, to_ts) values (?, ?, ?, ?, ?)",
            (my_fc_id, str(fc.id), fc.camId, fc.fromTimestamp, fc.toTimestamp))

        # alle Frames iterieren
        tpls = []

        for f in fc.frames:
            # pro frame einen Eintrag in Frame Tabelle machen
            c.execute(
                "insert into frame (frame_id, fc_id, timestamp) values (?, ?, ?)",
                (my_frame_id, my_fc_id, f.timestamp))

            # alle Detections iterieren
            for d in f.detectionsUnion.detectionsDP:
                d = Detection(my_frame_id, d.xpos, d.ypos, d.zRotation,
                              list(d.decodedId))
                tpls.append(d)

            # hochzaehlen
            my_frame_id += 1

        df = pd.DataFrame(tpls)
        df = prep.calcIds(df, confidence, year)
        df.drop('confidence', axis=1, inplace=True)

        # Detections zu db hinzufuegen
        df.to_sql('DETECTIONS', conn, if_exists='append', index=False)

        # hochzaehlen!
        my_fc_id += 1

    conn.commit()
    conn.close()