示例#1
0
  def __init__(self, report_file_path, class_names_file_path,
               smooth_probs=False, smoothing_factor=16):
    class_name_map = IO.read_class_names(class_names_file_path)

    class_header_names = [class_name + '_probability'
                          for class_name in class_name_map.values()]

    header_mask = ['frame_number', 'frame_timestamp', 'qa_flag']
    header_mask.extend(class_header_names)

    report_header, report_data, data_col_range = IO.read_report(
      report_file_path, frame_col_num=1, timestamp_col_num=2, qa_flag_col_num=3,
      header_mask=header_mask, return_data_col_range=True)

    report_frame_numbers = report_data['frame_numbers']
    report_frame_numbers = report_frame_numbers.astype(np.int32)

    try:
      report_timestamps = report_data['frame_timestamps']
      report_timestamps = report_timestamps.astype(np.int32)
      qa_flags = report_data['qa_flag']
    except:
      report_timestamps = None
      qa_flags = None

    report_probs = report_data['probabilities']
    report_probs = report_probs.astype(np.float32)

    if smooth_probs:
      report_probs = IO.smooth_probs(report_probs, smoothing_factor)

    Trip.__init__(self, report_frame_numbers, report_timestamps, qa_flags,
                  report_probs, class_name_map)
示例#2
0
def main():

    in_arg = IO.get_input_args(train=False)

    device = IO.get_device(in_arg.gpu)

    category_names = IO.get_label_mapping(in_arg.category_names)

    checkpoint = IO.load_checkpoint(in_arg.checkpoint)

    classifier = Classifier.generate_classifier_by_checkpoint(
        checkpoint, in_arg.arch, device)

    image = IO.load_image(in_arg.image_path)

    image_tensor = ImageUtils.get_image_tensor(image)

    probs, classes = classifier.predict(image_tensor, in_arg.top_k,
                                        checkpoint['class_to_idx'])

    labels = [category_names[cls] for cls in classes]

    print(probs)
    print(classes)
    print(labels)
示例#3
0
def main():

    in_arg = IO.get_input_args(train=True)

    device = IO.get_device(in_arg.gpu)

    dataloaders, class_to_idx = IO.get_image_data(in_arg.data_directory)

    classifier = Classifier(arch=in_arg.arch,
                            hidden_units=in_arg.hidden_units,
                            output_units=102,
                            learning_rate=in_arg.learning_rate,
                            epochs=in_arg.epochs,
                            device=device)

    classifier.train_model(dataloaders['trainloader'],
                           dataloaders['validloader'])

    trained_classifier = classifier.get_trained_classifier()

    checkpoint = {
        'classifier': trained_classifier['classifier'],
        'state_dict': trained_classifier['state_dict'],
        'learning_rate': trained_classifier['learning_rate'],
        'epochs': trained_classifier['epochs'],
        'class_to_idx': class_to_idx
    }

    IO.save_checkpoint(checkpoint, in_arg.save_dir)
示例#4
0
文件: model.py 项目: iyush/manta
    def __init__(self,
                 dim=2,
                 model_path=None,
                 input_size=13,
                 depth=3,
                 checkpoint=True):
        self.model_exists = False
        self.checkpoint = checkpoint

        self.model_path = str(model_path)
        self.use_cache = True
        self.epochs = 600

        print(self.model_path)

        self.model_name = "{}-epochs-{}-layers-sigmoid-3D-{}-inputSize-model".format(
            self.epochs, depth, input_size)

        if model_path == None:
            self.model_path = os.path.dirname(
                __file__) + "/../cache/model/" + datetime.datetime.now(
                ).strftime("%Y%m%d-%H%M%S") + "-" + self.model_name
            self.use_cache = False

        self.optimizer = keras.optimizers.Adam(learning_rate=0.002)
        self.loss_fn = keras.losses.MeanSquaredError()
        self.validation_metric = keras.metrics.MeanSquaredError()

        # tensorboard initialization
        logs_path = os.path.dirname(
            __file__) + "/../cachesdasdasd/logs/fit/" + datetime.datetime.now(
            ).strftime("%Y%m%d-%H%M%S") + "-" + self.model_name
        self.train_summary_writer = tf.summary.create_file_writer(logs_path +
                                                                  "/train")
        self.val_summary_writer = tf.summary.create_file_writer(logs_path +
                                                                "/validation")

        # model definition
        self.model = keras.Sequential()
        self.model.add(keras.Input(input_size))
        # hidden layers
        for i in range(depth):
            self.model.add(
                keras.layers.Dense(6, activation=keras.activations.sigmoid))

        # output
        self.model.add(keras.layers.Dense(1))

        self.model.compile(loss=self.loss_fn)

        # quick hack to check if hdf5 is in the path or not, if not we are defining a new model
        if IO.exists(self.model_path) and self.use_cache and "hdf5" in str(
                model_path):
            IO.debug("Using the checkpoint!")
            self.model_exists = True
            self.model.load_weights(model_path)
示例#5
0
文件: test_io.py 项目: iyush/manta
    def setUp(self):
        self.fakeVel2D = np.asarray(
            [
                [1, 2, 3],
                [4, 5, 6],
                [7, 8, 9]
            ]
        )

        self.fakePressure2D = np.asarray(
            [
                [11, 22, 33],
                [44, 55, 66],
                [77, 88, 99],
            ]
        )

        self.fakeOccupancy2D = np.asarray(
            [
                [2, 2, 2],
                [2, 0, 2],
                [2, 2, 2],
            ]
        )
        IO.createPath(os.path.dirname(__file__) +
                      "/data_for_testing_2D/")
        np.savez(os.path.dirname(__file__) +
                 "/data_for_testing_2D/div_vel_0000", self.fakeVel2D)
        np.savez(os.path.dirname(__file__) +
                 "/data_for_testing_2D/pressure_0000", self.fakePressure2D)
        np.savez(os.path.dirname(__file__) +
                 "/data_for_testing_2D/flags_0000", self.fakePressure2D)

        self.fakeVel3D = np.asarray([
            [[1,   2,  3], [4,   5,  6], [7,   8,  9]],
            [[10, 11, 12], [13, 14, 15], [16, 17, 18]],
            [[19, 20, 21], [22, 23, 24], [25, 26, 27]],
        ])
        self.fakePressure3D = np.asarray([
            [[111, 222, 333], [444, 555, 666], [777, 888, 999]],
            [[1111, 2222, 3333], [4444, 5555, 6666], [7777, 8888, 9999]],
            [[11111, 22222, 33333], [44444, 55555, 66666], [77777, 88888, 99999]],
        ])
        self.fakeOccupancy3D = np.asarray([
            [[0, 0, 0], [0, 0, 0], [0, 0, 0]],
            [[0, 0, 0], [0, 0, 0], [0, 0, 0]],
            [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
        ])
        IO.createPath(os.path.dirname(__file__) +
                      "/data_for_testing_3D/")
        np.savez(os.path.dirname(__file__) +
                 "/data_for_testing_3D/div_vel_0000", self.fakeVel3D)
        np.savez(os.path.dirname(__file__) +
                 "/data_for_testing_3D/pressure_0000", self.fakePressure3D)
        np.savez(os.path.dirname(__file__) +
                 "/data_for_testing_3D/flags_0000", self.fakePressure3D)
示例#6
0
文件: test_io.py 项目: iyush/manta
    def test_fakeVel3D(self):

        io = IO(os.path.dirname(__file__) + "/data_for_testing_3D", ext="npz")
        fakeVels, fakePressures, fakeOccupancy = io.readAll()

        self.assertTrue(fakeVels[0].tolist(), self.fakeVel3D.tolist())
        self.assertTrue(fakePressures[0].tolist(),
                        self.fakePressure3D.tolist())
        self.assertTrue(fakeOccupancy[0].tolist(),
                        self.fakeOccupancy3D.tolist())
 def get_parser(add_help=False):
     parent_parser = IO.get_parser(add_help=False)
     parser = argparse.ArgumentParser(
         add_help=False,
         parents=[parent_parser],
         description='Graph Convolution Network for Pose Matching')
     parser.set_defaults(config='config/inference.yaml')
     return parser
示例#8
0
    def plot_grids(grids,
                   gridType="div_vel",
                   movie_path=os.path.dirname(__file__) +
                   "/../visuals/movies"):

        IO.createPath(movie_path)

        f_name_pattern = movie_path + "/" + gridType

        for i, grid in enumerate(grids):
            Visualize.plot_grid(grid,
                                f_name_pattern + "_{:04d}".format(i) + ".png")

        # turn it into gif
        subprocess.call([
            'ffmpeg', '-i', f_name_pattern + "_%04d.png", '-y',
            f_name_pattern + ".gif"
        ])

        files_in_dir = os.listdir(movie_path)
        for item in files_in_dir:
            if item.endswith(".png") or item.endswith(".mp4"):
                os.remove(os.path.join(movie_path, item))
示例#9
0
    def __getitem__(self, idx):
        video = self.file_list[idx]
        frames = []
        masks = []
        opt_flows = []
        n_objects = []

        frame_indexes = self._get_frame_indexes(video['n_frames'],
                                                self.n_max_frames)
        for fi in frame_indexes:
            frame = np.array(IO.get(video['frames'][fi]).convert('RGB'))
            frames.append(np.array(frame))
            mask = IO.get(video['masks'][fi])
            mask = mask.convert('P') if mask is not None else np.zeros(
                frame.shape[:-1])
            masks.append(np.array(mask))
            opt_flow = IO.get(
                video['optical_flow'][fi]) if 'optical_flow' in video else None
            opt_flow = opt_flow if opt_flow is not None else np.zeros(
                frame.shape[:-1] + (2, ))
            opt_flows.append(np.array(opt_flow))

        # The number of objects in the masks
        mask_indexes = set()
        for m in masks:
            _mask_indexes = np.unique(m)
            _mask_indexes = _mask_indexes[_mask_indexes != self.ignore_idx]
            mask_indexes.update(_mask_indexes)
            _n_objects = min(len(mask_indexes) - 1, self.n_max_objects)
            n_objects.append(_n_objects)

        # Data preprocessing and augmentation
        if self.transforms is not None:
            frames, masks, opt_flows = self.transforms(frames, masks,
                                                       opt_flows)

        return video['name'], np.array(n_objects), frames, masks, opt_flows
示例#10
0
    def __getitem__(self, idx):
        sample = self.file_list[idx]
        data = {}
        rand_idx = -1
        if 'n_renderings' in self.options:
            rand_idx = random.randint(0, self.options['n_renderings'] - 1) if self.options['shuffle'] else 0

        for ri in self.options['required_items']:
            file_path = sample['%s_path' % ri]
            if type(file_path) == list:
                file_path = file_path[rand_idx]
            # print(file_path)
            data[ri] = IO.get(file_path).astype(np.float32)

        if self.transforms is not None:
            data = self.transforms(data)

        return sample['taxonomy_id'], sample['model_id'], data
示例#11
0
    def __getitem__(self, idx):
        sample = self.file_list[idx]
        data = {}
        # rand_idx = -1
        # if 'n_renderings' in self.options:
        rand_idx = random.randint(0, 8 - 1) if self.options['shuffle'] else 0
        # print('rand_idx2: ', rand_idx)

        for ri in self.options['required_items']:  # 'partial' & 'gt'
            file_path = sample['%s_path' % ri]
            if type(file_path) == list:
                file_path = file_path[rand_idx]
                # print('rand_idx1: ', rand_idx)

            # 返回值是三维坐标的列表(np)
            data[ri] = IO.get(file_path).astype(np.float32)

        # print('data\'s shape: ', data['partial_cloud'].shape)

        if self.transforms is not None:
            data = self.transforms(data)

        return sample['taxonomy_id'], sample['model_id'], data
示例#12
0
def process_video(video_file_path, output_dir_path, class_name_map, model_name,
                  model_signature_name, model_server_host, model_input_size,
                  return_code_queue, log_queue, log_level, ffmpeg_path,
                  ffprobe_path, do_crop, crop_width, crop_height, crop_x,
                  crop_y, do_extract_timestamps, timestamp_max_width,
                  timestamp_height, timestamp_x, timestamp_y, do_deinterlace,
                  num_channels, batch_size, do_smooth_probs, smoothing_factor,
                  do_binarize_probs, do_write_inference_reports,
                  do_write_event_reports, max_threads, processor_mode):
    configure_logger(log_level, log_queue)

    interrupt_queue = Queue()

    # Create a output subdirectory for the current mode
    output_dir_path = path.join(output_dir_path, processor_mode)

    def interrupt_handler(signal_number, _):
        logging.warning('received interrupt signal {}.'.format(signal_number))

        interrupt_queue.put_nowait('_')

        # TODO: cancel timestamp/report generation when an interrupt is signalled
        # logging.debug('instructing inference pipeline to halt.')
        # child_interrupt_queue.put_nowait('_')

    signal.signal(signal.SIGINT, interrupt_handler)

    video_file_name = path.basename(video_file_path)
    video_file_name, _ = path.splitext(video_file_name)

    logging.info('preparing to analyze {}'.format(video_file_path))

    output_files = []

    try:
        start = time()

        frame_width, frame_height, num_frames, _ = IO.get_video_dimensions(
            video_file_path, ffprobe_path)

        end = time() - start

        processing_duration = IO.get_processing_duration(
            end, 'read video dimensions in')

        logging.info(processing_duration)
    except Exception as e:
        logging.error('encountered an unexpected error while fetching video '
                      'dimensions')
        logging.error(e)

        logging.debug(
            'will exit with code: exception and value get_video_dimensions')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'get_video_dimensions'
        })
        return_code_queue.close()

        return

    try:
        do_crop = should_crop(frame_width, frame_height, do_crop, crop_width,
                              crop_height, crop_x, crop_y)
    except Exception as e:
        logging.error(e)

        logging.debug('will exit with code: exception and value should_crop')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'should_crop'
        })
        return_code_queue.close()

        return

    logging.debug('Constructing ffmpeg command')

    ffmpeg_command = [ffmpeg_path, '-i', video_file_path]

    if do_deinterlace:
        ffmpeg_command.append('-deinterlace')

    ffmpeg_command.extend([
        '-vcodec', 'rawvideo', '-pix_fmt', 'rgb24', '-vsync', 'vfr',
        '-hide_banner', '-loglevel', '0', '-f', 'image2pipe', 'pipe:1'
    ])

    try:
        do_extract_timestamps = should_extract_timestamps(
            frame_width, frame_height, do_extract_timestamps,
            timestamp_max_width, timestamp_height, timestamp_x, timestamp_y)
    except Exception as e:
        logging.error(e)

        logging.debug(
            'will exit with code: exception and value should_extract_timestamps'
        )
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'should_extract_timestamps'
        })
        return_code_queue.close()

        return

    frame_shape = [frame_height, frame_width, num_channels]

    logging.debug('FFmpeg output frame shape == {}'.format(frame_shape))
    #TODO parameterize tf serving values
    analyzer = VideoAnalyzer(frame_shape, num_frames, len(class_name_map),
                             batch_size, model_name, model_signature_name,
                             model_server_host, model_input_size,
                             do_extract_timestamps, timestamp_x, timestamp_y,
                             timestamp_height, timestamp_max_width, do_crop,
                             crop_x, crop_y, crop_width, crop_height,
                             ffmpeg_command, max_threads)

    try:
        start = time()

        num_analyzed_frames, probability_array, timestamp_array = analyzer.run(
        )

        end = time()

        analysis_duration = end - start

        processing_duration = IO.get_processing_duration(
            analysis_duration,
            'processed {} frames in'.format(num_analyzed_frames))
        logging.info(processing_duration)

        if num_analyzed_frames != num_frames:
            if interrupt_queue.empty():
                raise AssertionError('num_analyzed_frames ({}) != num_frames '
                                     '({})'.format(num_analyzed_frames,
                                                   num_frames))
            else:
                raise InterruptedError(
                    'num_analyzed_frames ({}) != num_frames '
                    '({})'.format(num_analyzed_frames, num_frames))
    except InterruptedError as ae:
        logging.error(ae)

        logging.debug(
            'will exit with code: interrupt and value: analyze_video')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'interrupt',
            'return_value': 'analyze_video'
        })
        return_code_queue.close()

        return
    except AssertionError as ae:
        logging.error(ae)

        logging.debug(
            'will exit with code: assertion error and value: analyze_video')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'assertion error',
            'return_value': 'analyze_video'
        })
        return_code_queue.close()

        return
    except Exception as e:
        logging.error(
            'encountered an unexpected error while analyzing {}'.format(
                video_file_name))
        logging.error(e)

        logging.debug(
            'will exit with code: exception and value: analyze_video')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'analyze_video'
        })
        return_code_queue.close()

        return

    logging.debug('converting timestamp images to strings')

    if do_extract_timestamps:
        try:
            start = time()

            timestamp_object = Timestamp(timestamp_height, timestamp_max_width)
            timestamp_strings, qa_flags = \
              timestamp_object.stringify_timestamps(timestamp_array)

            end = time() - start

            processing_duration = IO.get_processing_duration(
                end, 'timestamp strings converted in')

            logging.info(processing_duration)
        except Exception as e:
            logging.error('encountered an unexpected error while converting '
                          'timestamp image crops to strings'.format(
                              os.getpid()))
            logging.error(e)

            logging.debug(
                'will exit with code: exception and value: stringify_timestamps'
            )
            log_queue.put(None)
            log_queue.close()

            return_code_queue.put({
                'return_code': 'exception',
                'return_value': 'stringify_timestamps'
            })
            return_code_queue.close()

            return
    else:
        timestamp_strings = None
        qa_flags = None

    logging.debug('attempting to generate reports')

    if do_write_inference_reports:
        try:
            start = time()

            inf_report = IO.write_inference_report(
                video_file_name, output_dir_path, analyzer.prob_array,
                class_name_map, timestamp_strings, qa_flags, do_smooth_probs,
                smoothing_factor, do_binarize_probs)
            output_files.append(inf_report)
            end = time() - start

            processing_duration = IO.get_processing_duration(
                end, 'generated inference reports in')
            logging.info(processing_duration)
        except Exception as e:
            logging.error(
                'encountered an unexpected error while generating inference report.'
            )
            logging.error(e)

            logging.debug(
                'will exit with code: exception and value: write_inference_report'
            )
            log_queue.put(None)
            log_queue.close()

            return_code_queue.put({
                'return_code': 'exception',
                'return_value': 'write_inference_report'
            })
            return_code_queue.close()

            return

    try:
        start = time()

        if do_smooth_probs:
            probability_array = IO.smooth_probs(probability_array,
                                                smoothing_factor)

        frame_numbers = list(range(1, len(probability_array) + 1))

        if timestamp_strings is not None:
            timestamp_strings = timestamp_strings.astype(np.int32)
        trip = Trip(frame_numbers, timestamp_strings, qa_flags,
                    probability_array, class_name_map)

        if processor_mode == "weather":
            if len(trip.feature_sequence) > 0:
                logging.info('{} weather events were found in {}'.format(
                    len(trip.feature_sequence), video_file_name))
                if do_write_event_reports:
                    weather_rep = IO.write_weather_report(
                        video_file_name, output_dir_path,
                        trip.feature_sequence)
                    output_files.append(weather_rep)
        else:
            events = trip.find_work_zone_events()

            if len(events) > 0:
                logging.info('{} work zone events were found in {}'.format(
                    len(events), video_file_name))

                if do_write_event_reports:
                    event_rep = IO.write_event_report(video_file_name,
                                                      output_dir_path, events)
                    output_files.append(event_rep)
            else:
                logging.info('No work zone events were found in {}'.format(
                    video_file_name))

        end = time() - start

        processing_duration = IO.get_processing_duration(
            end, 'generated event reports in')
        logging.info(processing_duration)
    except Exception as e:
        logging.error(
            'encountered an unexpected error while generating event report.')
        logging.error(e)

        logging.debug(
            'will exit with code: exception and value: write_event_report')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'write_event_report'
        })
        return_code_queue.close()

        return

    logging.debug('will exit with code: success and value: {}'.format(
        num_analyzed_frames))
    log_queue.put(None)
    log_queue.close()

    return_code_queue.put({
        'return_code': 'success',
        'return_value': num_analyzed_frames,
        'analysis_duration': analysis_duration,
        'output_locations': str(output_files)
    })
    return_code_queue.close()
示例#13
0
def process_video_signalstate(
        video_file_path, output_dir_path, class_name_map, model_name,
        model_signature_name, model_server_host, model_input_size,
        return_code_queue, log_queue, log_level, ffmpeg_path, ffprobe_path,
        do_crop, crop_width, crop_height, crop_x, crop_y,
        do_extract_timestamps, timestamp_max_width, timestamp_height,
        timestamp_x, timestamp_y, do_deinterlace, num_channels, batch_size,
        do_smooth_probs, smoothing_factor, do_binarize_probs,
        do_write_bbox_reports, do_write_event_reports, max_threads,
        processor_mode):
    configure_logger(log_level, log_queue)

    interrupt_queue = Queue()

    # Create a output subdirectory for the current mode
    output_dir_path = path.join(output_dir_path, processor_mode)
    output_files = []

    def interrupt_handler(signal_number, _):
        logging.warning('received interrupt signal {}.'.format(signal_number))

        interrupt_queue.put_nowait('_')

        # TODO: cancel timestamp/report generation when an interrupt is signalled
        # logging.debug('instructing inference pipeline to halt.')
        # child_interrupt_queue.put_nowait('_')

    signal.signal(signal.SIGINT, interrupt_handler)

    video_file_name = path.basename(video_file_path)
    video_file_name, _ = path.splitext(video_file_name)

    logging.info('preparing to signalstate analyze {}'.format(video_file_path))

    try:
        start = time()

        # For signal state, we use duration as num_frames, as we will only grab one frame per second
        frame_width, frame_height, num_frames, duration = IO.get_video_dimensions(
            video_file_path, ffprobe_path)
        num_frames = duration
        end = time() - start

        processing_duration = IO.get_processing_duration(
            end, 'read video dimensions in')

        logging.info(processing_duration)
    except Exception as e:
        logging.error('encountered an unexpected error while fetching video '
                      'dimensions')
        logging.error(e)

        logging.debug(
            'will exit with code: exception and value get_video_dimensions')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'get_video_dimensions'
        })
        return_code_queue.close()

        return

    try:
        do_crop = should_crop(frame_width, frame_height, do_crop, crop_width,
                              crop_height, crop_x, crop_y)
    except Exception as e:
        logging.error(e)

        logging.debug('will exit with code: exception and value should_crop')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'should_crop'
        })
        return_code_queue.close()

        return

    logging.debug('Constructing ffmpeg command')

    ffmpeg_command = [ffmpeg_path, '-i', video_file_path]

    if do_deinterlace:
        ffmpeg_command.append('-deinterlace')

    ffmpeg_command.extend([
        '-vcodec', 'rawvideo', '-pix_fmt', 'rgb24', '-vsync', 'vfr',
        '-hide_banner', '-loglevel', '0', '-r', '1', '-f', 'image2pipe',
        'pipe:1'
    ])

    try:
        do_extract_timestamps = should_extract_timestamps(
            frame_width, frame_height, do_extract_timestamps,
            timestamp_max_width, timestamp_height, timestamp_x, timestamp_y)
    except Exception as e:
        logging.error(e)

        logging.debug(
            'will exit with code: exception and value should_extract_timestamps'
        )
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'should_extract_timestamps'
        })
        return_code_queue.close()

        return

    frame_shape = [frame_height, frame_width, num_channels]

    logging.debug('FFmpeg output frame shape == {}'.format(frame_shape))

    analyzer = SignalVideoAnalyzer(
        frame_shape, num_frames, len(class_name_map), batch_size, model_name,
        model_signature_name, model_server_host, model_input_size,
        do_extract_timestamps, timestamp_x, timestamp_y, timestamp_height,
        timestamp_max_width, do_crop, crop_x, crop_y, crop_width, crop_height,
        ffmpeg_command, max_threads)

    try:
        start = time()

        num_analyzed_frames, frame_map_array, timestamp_array = analyzer.run()

        end = time()

        analysis_duration = end - start

        processing_duration = IO.get_processing_duration(
            analysis_duration,
            'processed {} frames in'.format(num_analyzed_frames))
        logging.info(processing_duration)

    except InterruptedError as ae:
        logging.error(ae)

        logging.debug(
            'will exit with code: interrupt and value: analyze_video')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'interrupt',
            'return_value': 'analyze_video'
        })
        return_code_queue.close()

        return
    except AssertionError as ae:
        logging.error(ae)

        logging.debug(
            'will exit with code: assertion error and value: analyze_video')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'assertion error',
            'return_value': 'analyze_video'
        })
        return_code_queue.close()

        return
    except Exception as e:
        logging.error(
            'encountered an unexpected error while analyzing {}'.format(
                video_file_name))
        logging.error(e)

        logging.debug(
            'will exit with code: exception and value: analyze_video')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'analyze_video'
        })
        return_code_queue.close()

        return

    logging.debug('converting timestamp images to strings')

    if do_extract_timestamps:
        try:
            start = time()

            timestamp_object = Timestamp(timestamp_height, timestamp_max_width)
            timestamp_strings, qa_flags = \
              timestamp_object.stringify_timestamps(timestamp_array)

            end = time() - start

            processing_duration = IO.get_processing_duration(
                end, 'timestamp strings converted in')

            logging.info(processing_duration)
        except Exception as e:
            logging.error('encountered an unexpected error while converting '
                          'timestamp image crops to strings'.format(
                              os.getpid()))
            logging.error(e)

            logging.debug(
                'will exit with code: exception and value: stringify_timestamps'
            )
            log_queue.put(None)
            log_queue.close()

            return_code_queue.put({
                'return_code': 'exception',
                'return_value': 'stringify_timestamps'
            })
            return_code_queue.close()

            return
    else:
        timestamp_strings = None
        qa_flags = None

    logging.debug('attempting to generate reports')

    if do_write_bbox_reports:
        json_data = []
        for frame_num, frame_map in enumerate(frame_map_array, start=0):
            if timestamp_strings is not None:
                timestamp = timestamp_strings[frame_num]
            else:
                timestamp = None
            for i in range(0, frame_map['num_detections']):
                class_name = class_name_map[frame_map['detection_classes'][i]]
                bbox = frame_map['detection_boxes'][i]
                json_data.append({
                    'frame_num':
                    int(frame_num),
                    'video_name':
                    video_file_name,
                    'timestamp':
                    int(timestamp),
                    'class_name':
                    class_name,
                    'detection_boxes':
                    bbox.tolist(),
                    'detection_score':
                    float(frame_map['detection_scores'][i])
                })
        bbox_rep = IO.write_json(video_file_name + 'BBOX', output_dir_path,
                                 json_data)
        output_files.append(bbox_rep)

    try:
        start = time()

        frame_numbers = list(range(1, len(frame_map_array) + 1))

        if timestamp_strings is not None:
            timestamp_strings = timestamp_strings.astype(np.int32)

        # Process our raw predictions into a list of bounding boxes and frame data
        detections = []
        for frame_num, frame_map in enumerate(frame_map_array, start=0):
            if timestamp_strings is not None:
                timestamp = timestamp_strings[frame_num]
            else:
                timestamp = None
            for i in range(0, frame_map['num_detections']):
                class_name = class_name_map[frame_map['detection_classes'][i]]
                bbox = frame_map['detection_boxes'][i]
                xtl = bbox[1] * frame_width
                ytl = bbox[0] * frame_height
                xbr = bbox[3] * frame_width
                ybr = bbox[2] * frame_height
                detections.append({'frame_num': frame_num, 'timestamp': timestamp, 'classification': class_name, \
                                  'xtl': xtl, 'ytl': ytl, 'xbr': xbr, 'ybr': ybr})
        if len(detections) > 0:
            logging.info('{} signal state detections were found in {}'.format(
                len(detections), video_file_name))

            if do_write_event_reports:
                evt_rep = IO.write_signalstate_report(video_file_name,
                                                      output_dir_path,
                                                      detections)
                output_files.append(evt_rep)
        else:
            logging.info('No signal state events were found in {}'.format(
                video_file_name))

        end = time() - start

        processing_duration = IO.get_processing_duration(
            end, 'generated event reports in')
        logging.info(processing_duration)
    except Exception as e:
        logging.error(
            'encountered an unexpected error while generating event report.')
        logging.error(e)

        logging.debug(
            'will exit with code: exception and value: write_event_report')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'write_event_report'
        })
        return_code_queue.close()

        return

    logging.debug('will exit with code: success and value: {}'.format(
        num_analyzed_frames))
    log_queue.put(None)
    log_queue.close()

    return_code_queue.put({
        'return_code': 'success',
        'return_value': num_analyzed_frames,
        'analysis_duration': analysis_duration,
        'output_locations': str(output_files)
    })
    return_code_queue.close()
示例#14
0
文件: main_yang.py 项目: iyush/manta
res_x = 48
res_y = 64
res_z = 48
ngrids = 150

sims = [str(x) for x in args.training_director_folder.iterdir() if x.is_dir()]
nsims = len(sims)

nn_input = np.zeros((nsims, 40 * res_x * res_y * res_z, 19))
nn_output = np.zeros((nsims, 40 * res_x * res_y * res_z))

for i, sim_name in enumerate(sims):
    pprint("-----------------------------------------------------------")
    pprint("Loading data from simulation: {}".format(sim_name))

    f_in = IO(sim_name, ext="npz")

    pressure_fields, vel_fields, occupancy_grids = f_in.readAll()

    vel_fields = vel_fields.reshape((ngrids, res_x, res_y, res_z))
    pressure_fields = pressure_fields.reshape(
        (ngrids, res_x, res_y, res_z))
    occupancy_grids = occupancy_grids.reshape(
        (ngrids, res_x, res_y, res_z))

    nn_input[i], nn_output[i] = loadData(
        vel_fields=vel_fields,
        pressure_fields=pressure_fields,
        occupancy_grids=occupancy_grids
    )
示例#15
0
文件: snva.py 项目: khanhgithead/SNVA
def main():
  logging.info('entering snva {} main process'.format(snva_version_string))

  total_num_video_to_process = None

  def interrupt_handler(signal_number, _):
    logging.warning('Main process received interrupt signal '
                    '{}.'.format(signal_number))
    main_interrupt_queue.put_nowait('_')

    if total_num_video_to_process is None \
        or total_num_video_to_process == len(video_file_paths):

      # Signal the logging thread to finish up
      logging.debug('signaling logger thread to end service.')

      log_queue.put_nowait(None)

      logger_thread.join()

      logging.shutdown()

  signal.signal(signal.SIGINT, interrupt_handler)

  try:
    ffmpeg_path = os.environ['FFMPEG_HOME']
  except KeyError:
    logging.warning('Environment variable FFMPEG_HOME not set. Attempting '
                    'to use default ffmpeg binary location.')
    if platform.system() == 'Windows':
      ffmpeg_path = 'ffmpeg.exe'
    else:
      ffmpeg_path = '/usr/local/bin/ffmpeg'

      if not path.exists(ffmpeg_path):
        ffmpeg_path = '/usr/bin/ffmpeg'

  logging.debug('FFMPEG path set to: {}'.format(ffmpeg_path))

  try:
    ffprobe_path = os.environ['FFPROBE_HOME']
  except KeyError:
    logging.warning('Environment variable FFPROBE_HOME not set. '
                    'Attempting to use default ffprobe binary location.')
    if platform.system() == 'Windows':
      ffprobe_path = 'ffprobe.exe'
    else:
      ffprobe_path = '/usr/local/bin/ffprobe'

      if not path.exists(ffprobe_path):
        ffprobe_path = '/usr/bin/ffprobe'

  logging.debug('FFPROBE path set to: {}'.format(ffprobe_path))

  # TODO validate all video file paths in the provided text file if args.inputpath is a text file
  if path.isdir(args.inputpath):
    video_file_names = set(IO.read_video_file_names(args.inputpath))
    video_file_paths = [path.join(args.inputpath, video_file_name)
                        for video_file_name in video_file_names]
  elif path.isfile(args.inputpath):
    if args.inputpath[-3:] == 'txt':
      if args.inputlistrootdirpath is None:
        raise ValueError('--inputlistrootdirpath must be specified when using a'
                         ' text file as the input.')
      with open(args.inputpath, newline='') as input_file:
        video_file_paths = []

        for line in input_file.readlines():
          line = line.rstrip()
          video_file_path = line.lstrip(args.inputlistrootdirpath)
          video_file_path = path.join('/media/root', video_file_path)

          if path.isfile(video_file_path):
            video_file_paths.append(video_file_path)
          else:
            logging.warning('The video file at host path {} could not be found '
                            'at mapped path {} and will not be processed'.
              format(line, video_file_path))
    else:
      video_file_paths = [args.inputpath]
  else:
    raise ValueError('The video file/folder specified at the path {} could '
                     'not be found.'.format(args.inputpath))

  models_root_dir_path = path.join(snva_home, args.modelsdirpath)

  models_dir_path = path.join(models_root_dir_path, args.modelname)

  logging.debug('models_dir_path set to {}'.format(models_dir_path))

  model_file_path = path.join(models_dir_path, args.protobuffilename)

  if not path.isfile(model_file_path):
    raise ValueError('The model specified at the path {} could not be '
                     'found.'.format(model_file_path))

  logging.debug('model_file_path set to {}'.format(model_file_path))

  model_input_size_file_path = path.join(models_dir_path, 'input_size.txt')

  if not path.isfile(model_input_size_file_path):
    raise ValueError('The model input size file specified at the path {} '
                     'could not be found.'.format(model_input_size_file_path))

  logging.debug('model_input_size_file_path set to {}'.format(
    model_input_size_file_path))

  with open(model_input_size_file_path) as file:
    model_input_size_string = file.readline().rstrip()

    valid_size_set = ['224', '299']

    if model_input_size_string not in valid_size_set:
      raise ValueError('The model input size is not in the set {}.'.format(
        valid_size_set))

    model_input_size = int(model_input_size_string)

  # if logpath is the default value, expand it using the SNVA_HOME prefix,
  # otherwise, use the value explicitly passed by the user
  if args.outputpath == 'reports':
    output_dir_path = path.join(snva_home, args.outputpath)
  else:
    output_dir_path = args.outputpath

  if not path.isdir(output_dir_path):
    os.makedirs(output_dir_path)

  if args.excludepreviouslyprocessed:
    inference_report_dir_path = path.join(output_dir_path, 'inference_reports')

    if args.writeinferencereports and path.isdir(inference_report_dir_path):
      inference_report_file_names = os.listdir(inference_report_dir_path)
      inference_report_file_names = [path.splitext(name)[0]
                                     for name in inference_report_file_names]
      print('previously generated inference reports: {}'.format(
        inference_report_file_names))
    else:
      inference_report_file_names = None
    event_report_dir_path = path.join(output_dir_path, 'event_reports')

    if args.writeeventreports and path.isdir(event_report_dir_path):
      event_report_file_names = os.listdir(event_report_dir_path)
      event_report_file_names = [path.splitext(name)[0]
                                 for name in event_report_file_names]
      print('previously generated event reports: {}'.format(
        event_report_file_names))
    else:
      event_report_file_names = None

    file_paths_to_exclude = set()

    for video_file_path in video_file_paths:
      video_file_name = path.splitext(path.split(video_file_path)[1])[0]
      if (event_report_file_names and video_file_name 
      in event_report_file_names) \
          or (inference_report_file_names and video_file_name 
          in inference_report_file_names):
        file_paths_to_exclude.add(video_file_path)

    video_file_paths -= file_paths_to_exclude

  if args.ionodenamesfilepath is None \
      or not path.isfile(args.ionodenamesfilepath):
    io_node_names_path = path.join(models_dir_path, 'io_node_names.txt')
  else:
    io_node_names_path = args.ionodenamesfilepath
  logging.debug('io tensors path set to: {}'.format(io_node_names_path))

  node_name_map = IO.read_node_names(io_node_names_path)

  if args.classnamesfilepath is None \
      or not path.isfile(args.classnamesfilepath):
    class_names_path = path.join(models_root_dir_path, 'class_names.txt')
  else:
    class_names_path = args.classnamesfilepath
  logging.debug('labels path set to: {}'.format(class_names_path))

  if args.cpuonly:
    device_id_list = ['0']
    device_type = 'cpu'
  else:
    device_id_list = IO.get_device_ids()
    device_type = 'gpu'

  physical_device_count = len(device_id_list)

  logging.info('Found {} physical {} device(s).'.format(
    physical_device_count, device_type))

  valid_num_processes_list = get_valid_num_processes_per_device(device_type)

  if args.numprocessesperdevice not in valid_num_processes_list:
      raise ValueError(
        'The the number of processes to assign to each {} device is expected '
        'to be in the set {}.'.format(device_type, valid_num_processes_list))

  for i in range(physical_device_count,
                 physical_device_count * args.numprocessesperdevice):
    device_id_list.append(str(i))

  logical_device_count = len(device_id_list)

  logging.info('Generated an additional {} logical {} device(s).'.format(
    logical_device_count - physical_device_count, device_type))

  # child processes will dequeue and enqueue device names
  device_id_queue = Queue(logical_device_count)

  for device_id in device_id_list:
    device_id_queue.put(device_id)

  class_name_map = IO.read_class_names(class_names_path)

  logging.debug('loading model at path: {}'.format(model_file_path))

  return_code_queue_map = {}
  child_logger_thread_map = {}
  child_process_map = {}

  total_num_video_to_process = len(video_file_paths)

  total_num_processed_videos = 0
  total_num_processed_frames = 0
  total_analysis_duration = 0

  logging.info('Processing {} videos using {}'.format(
    total_num_video_to_process, args.modelname))

  def start_video_processor(video_file_path):
    # Before popping the next video off of the list and creating a process to
    # scan it, check to see if fewer than logical_device_count + 1 processes are
    # active. If not, Wait for a child process to release its semaphore
    # acquisition. If so, acquire the semaphore, pop the next video name,
    # create the next child process, and pass the semaphore to it
    video_dir_path, video_file_name = path.split(video_file_path)

    return_code_queue = Queue()

    return_code_queue_map[video_file_name] = return_code_queue

    logging.debug('creating new child process.')

    child_log_queue = Queue()

    child_logger_thread = Thread(target=child_logger_fn,
                                 args=(log_queue, child_log_queue))

    child_logger_thread.start()

    child_logger_thread_map[video_file_name] = child_logger_thread

    gpu_memory_fraction = args.gpumemoryfraction / args.numprocessesperdevice

    child_process = Process(
      target=process_video, name=path.splitext(video_file_name)[0],
      args=(video_file_path, output_dir_path, class_name_map, model_input_size,
            device_id_queue, return_code_queue, child_log_queue, log_level,
            device_type, logical_device_count, physical_device_count,
            ffmpeg_path, ffprobe_path, model_file_path, node_name_map,
            gpu_memory_fraction, args.crop, args.cropwidth, args.cropheight,
            args.cropx, args.cropy, args.extracttimestamps,
            args.timestampmaxwidth, args.timestampheight, args.timestampx,
            args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
            args.smoothprobs, args.smoothingfactor, args.binarizeprobs))

    logging.debug('starting child process.')

    child_process.start()

    child_process_map[video_file_name] = child_process

  def close_completed_video_processors(
      total_num_processed_videos, total_num_processed_frames,
      total_analysis_duration):
    for video_file_name in list(return_code_queue_map.keys()):
      return_code_queue = return_code_queue_map[video_file_name]

      try:
        return_code_map = return_code_queue.get_nowait()

        return_code = return_code_map['return_code']
        return_value = return_code_map['return_value']

        child_process = child_process_map[video_file_name]

        logging.debug(
          'child process {} returned with exit code {} and exit value '
          '{}'.format(child_process.pid, return_code, return_value))

        if return_code == 'success':
          total_num_processed_videos += 1
          total_num_processed_frames += return_value
          total_analysis_duration += return_code_map['analysis_duration']

        child_logger_thread = child_logger_thread_map[video_file_name]
        
        logging.debug('joining logger thread for child process {}'.format(
          child_process.pid))

        child_logger_thread.join(timeout=15)

        if child_logger_thread.is_alive():
          logging.warning(
            'logger thread for child process {} remained alive following join '
            'timeout'.format(child_process.pid))
        
        logging.debug('joining child process {}'.format(child_process.pid))
        
        child_process.join(timeout=15)

        # if the child process has not yet terminated, kill the child process at
        # the risk of losing any log message not yet buffered by the main logger
        try:
          os.kill(child_process.pid, signal.SIGKILL)
          logging.warning(
            'child process {} remained alive following join timeout and had to '
            'be killed'.format(child_process.pid))
        except:
          pass
        
        return_code_queue.close()
        
        return_code_queue_map.pop(video_file_name)
        child_logger_thread_map.pop(video_file_name)
        child_process_map.pop(video_file_name)
      except Empty:
        pass

    return total_num_processed_videos, total_num_processed_frames, \
           total_analysis_duration

  start = time()

  while len(video_file_paths) > 0:
    # block if logical_device_count + 1 child processes are active
    while len(return_code_queue_map) > logical_device_count:
      total_num_processed_videos, total_num_processed_frames, \
      total_analysis_duration = close_completed_video_processors(
        total_num_processed_videos, total_num_processed_frames,
        total_analysis_duration)

    try:
      _ = main_interrupt_queue.get_nowait()
      logging.debug(
        'breaking out of child process generation following interrupt signal')
      break
    except:
      pass

    video_file_path = video_file_paths.pop()

    try:
      start_video_processor(video_file_path)
    except Exception as e:
      logging.error('an unknown error has occured while processing '
                    '{}'.format(video_file_path))
      logging.error(e)

  while len(return_code_queue_map) > 0:
    logging.debug('waiting for the final {} child processes to '
                  'terminate'.format(len(return_code_queue_map)))

    total_num_processed_videos, total_num_processed_frames, \
    total_analysis_duration = close_completed_video_processors(
      total_num_processed_videos, total_num_processed_frames,
      total_analysis_duration)

    # by now, the last device_id_queue_len videos are being processed,
    # so we can afford to poll for their completion infrequently
    if len(return_code_queue_map) > 0:
      sleep_duration = 10
      logging.debug('sleeping for {} seconds'.format(sleep_duration))
      sleep(sleep_duration)

  end = time() - start

  processing_duration = IO.get_processing_duration(
    end, 'snva {} processed a total of {} videos and {} frames in:'.format(
      snva_version_string, total_num_processed_videos,
      total_num_processed_frames))
  logging.info(processing_duration)

  logging.info('Video analysis alone spanned a cumulative {:.02f} '
               'seconds'.format(total_analysis_duration))

  logging.info('exiting snva {} main process'.format(snva_version_string))
示例#16
0
def test_net(cfg,
             epoch_idx=-1,
             test_data_loader=None,
             test_writer=None,
             grnet=None):
    # Enable the inbuilt cudnn auto-tuner to find the best algorithm to use
    torch.backends.cudnn.benchmark = True

    if test_data_loader is None:
        # Set up data loader
        dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[
            cfg.DATASET.TEST_DATASET](cfg)
        test_data_loader = torch.utils.data.DataLoader(
            dataset=dataset_loader.get_dataset(
                utils.data_loaders.DatasetSubset.TEST),
            batch_size=1,
            num_workers=cfg.CONST.NUM_WORKERS,
            collate_fn=utils.data_loaders.collate_fn,
            pin_memory=True,
            shuffle=False)

    # Setup networks and initialize networks
    if grnet is None:
        grnet = GRNet(cfg)

        if torch.cuda.is_available():
            grnet = torch.nn.DataParallel(grnet).cuda()

        logging.info('Recovering from %s ...' % (cfg.CONST.WEIGHTS))
        checkpoint = torch.load(cfg.CONST.WEIGHTS)
        grnet.load_state_dict(checkpoint['grnet'])

    # Switch models to evaluation mode
    grnet.eval()

    # Set up loss functions
    chamfer_dist = ChamferDistance()
    gridding_loss = GriddingLoss(
        scales=cfg.NETWORK.GRIDDING_LOSS_SCALES,
        alphas=cfg.NETWORK.GRIDDING_LOSS_ALPHAS)  # lgtm [py/unused-import]

    # Testing loop
    n_samples = len(test_data_loader)
    test_losses = AverageMeter(['SparseLoss', 'DenseLoss'])
    test_metrics = AverageMeter(Metrics.names())
    category_metrics = dict()

    # Testing loop
    for model_idx, (taxonomy_id, model_id,
                    data) in enumerate(test_data_loader):
        taxonomy_id = taxonomy_id[0] if isinstance(
            taxonomy_id[0], str) else taxonomy_id[0].item()
        model_id = model_id[0]

        with torch.no_grad():
            for k, v in data.items():
                data[k] = utils.helpers.var_or_cuda(v)

            sparse_ptcloud, dense_ptcloud = grnet(data)
            sparse_loss = chamfer_dist(sparse_ptcloud, data['gtcloud'])
            dense_loss = chamfer_dist(dense_ptcloud, data['gtcloud'])
            test_losses.update(
                [sparse_loss.item() * 1000,
                 dense_loss.item() * 1000])
            _metrics = Metrics.get(dense_ptcloud, data['gtcloud'])
            test_metrics.update(_metrics)

            # save predicted point cloud
            if cfg.TEST.SAVE_PRED:
                if cfg.DATASET.TEST_DATASET == 'FrankaScan':
                    dirname, obj_idx = model_id.split('-')
                    out_ptcloud = dense_ptcloud[0].cpu()
                    IO.put(
                        cfg.DATASETS.FRANKASCAN.PREDICTION_PATH %
                        (dirname, obj_idx), out_ptcloud)

            if taxonomy_id not in category_metrics:
                category_metrics[taxonomy_id] = AverageMeter(Metrics.names())
            category_metrics[taxonomy_id].update(_metrics)

            if test_writer is not None and model_idx < 3:
                sparse_ptcloud = sparse_ptcloud.squeeze().cpu().numpy()
                sparse_ptcloud_img = utils.helpers.get_ptcloud_img(
                    sparse_ptcloud)
                test_writer.add_image(
                    'Model%02d/SparseReconstruction' % model_idx,
                    sparse_ptcloud_img, epoch_idx)
                dense_ptcloud = dense_ptcloud.squeeze().cpu().numpy()
                dense_ptcloud_img = utils.helpers.get_ptcloud_img(
                    dense_ptcloud)
                test_writer.add_image(
                    'Model%02d/DenseReconstruction' % model_idx,
                    dense_ptcloud_img, epoch_idx)
                gt_ptcloud = data['gtcloud'].squeeze().cpu().numpy()
                gt_ptcloud_img = utils.helpers.get_ptcloud_img(gt_ptcloud)
                test_writer.add_image('Model%02d/GroundTruth' % model_idx,
                                      gt_ptcloud_img, epoch_idx)

            logging.info(
                'Test[%d/%d] Taxonomy = %s Sample = %s Losses = %s Metrics = %s'
                %
                (model_idx + 1, n_samples, taxonomy_id, model_id,
                 ['%.4f' % l
                  for l in test_losses.val()], ['%.4f' % m for m in _metrics]))

    # Print testing results
    print(
        '============================ TEST RESULTS ============================'
    )
    print('Taxonomy', end='\t')
    print('#Sample', end='\t')
    for metric in test_metrics.items:
        print(metric, end='\t')
    print()

    for taxonomy_id in category_metrics:
        print(taxonomy_id, end='\t')
        print(category_metrics[taxonomy_id].count(0), end='\t')
        for value in category_metrics[taxonomy_id].avg():
            print('%.4f' % value, end='\t')
        print()

    print('Overall', end='\t\t\t')
    for value in test_metrics.avg():
        print('%.4f' % value, end='\t')
    print('\n')

    # Add testing results to TensorBoard
    if test_writer is not None:
        test_writer.add_scalar('Loss/Epoch/Sparse', test_losses.avg(0),
                               epoch_idx)
        test_writer.add_scalar('Loss/Epoch/Dense', test_losses.avg(1),
                               epoch_idx)
        for i, metric in enumerate(test_metrics.items):
            test_writer.add_scalar('Metric/%s' % metric, test_metrics.avg(i),
                                   epoch_idx)

    return Metrics(cfg.TEST.METRIC_NAME, test_metrics.avg())
示例#17
0
文件: snva.py 项目: fabat01/SNVA
async def main():
    logging.info('entering snva {} main process'.format(snva_version_string))

    # total_num_video_to_process = None

    def interrupt_handler(signal_number, _):
        logging.warning('Main process received interrupt signal '
                        '{}.'.format(signal_number))
        main_interrupt_queue.put_nowait('_')

        # if total_num_video_to_process is None \
        #     or total_num_video_to_process == len(video_file_paths):

        # Signal the logging thread to finish up
        logging.debug('signaling logger thread to end service.')

        log_queue.put_nowait(None)

        logger_thread.join()

        logging.shutdown()

    signal.signal(signal.SIGINT, interrupt_handler)

    try:
        ffmpeg_path = os.environ['FFMPEG_HOME']
    except KeyError:
        logging.warning('Environment variable FFMPEG_HOME not set. Attempting '
                        'to use default ffmpeg binary location.')
        if platform.system() == 'Windows':
            ffmpeg_path = 'ffmpeg.exe'
        else:
            ffmpeg_path = '/usr/local/bin/ffmpeg'

            if not path.exists(ffmpeg_path):
                ffmpeg_path = '/usr/bin/ffmpeg'

    logging.debug('FFMPEG path set to: {}'.format(ffmpeg_path))

    try:
        ffprobe_path = os.environ['FFPROBE_HOME']
    except KeyError:
        logging.warning('Environment variable FFPROBE_HOME not set. '
                        'Attempting to use default ffprobe binary location.')
        if platform.system() == 'Windows':
            ffprobe_path = 'ffprobe.exe'
        else:
            ffprobe_path = '/usr/local/bin/ffprobe'

            if not path.exists(ffprobe_path):
                ffprobe_path = '/usr/bin/ffprobe'

    logging.debug('FFPROBE path set to: {}'.format(ffprobe_path))

    # # TODO validate all video file paths in the provided text file if args.inputpath is a text file
    # if path.isdir(args.inputpath):
    #   video_file_names = set(IO.read_video_file_names(args.inputpath))
    #   video_file_paths = [path.join(args.inputpath, video_file_name)
    #                       for video_file_name in video_file_names]
    # elif path.isfile(args.inputpath):
    #   if args.inputpath[-3:] == 'txt':
    #     if args.inputlistrootdirpath is None:
    #       raise ValueError('--inputlistrootdirpath must be specified when using a'
    #                        ' text file as the input.')
    #     with open(args.inputpath, newline='') as input_file:
    #       video_file_paths = []
    #
    #       for line in input_file.readlines():
    #         line = line.rstrip()
    #         video_file_path = line.lstrip(args.inputlistrootdirpath)
    #         video_file_path = path.join('/media/root', video_file_path)
    #
    #         if path.isfile(video_file_path):
    #           video_file_paths.append(video_file_path)
    #         else:
    #           logging.warning('The video file at host path {} could not be found '
    #                           'at mapped path {} and will not be processed'.
    #             format(line, video_file_path))
    #   else:
    #     video_file_paths = [args.inputpath]
    # else:
    #   raise ValueError('The video file/folder specified at the path {} could '
    #                    'not be found.'.format(args.inputpath))

    models_root_dir_path = path.join(snva_home, args.modelsdirpath)

    models_dir_path = path.join(models_root_dir_path, args.modelname)

    logging.debug('models_dir_path set to {}'.format(models_dir_path))

    # model_file_path = path.join(models_dir_path, args.protobuffilename)
    #
    # if not path.isfile(model_file_path):
    #   raise ValueError('The model specified at the path {} could not be '
    #                    'found.'.format(model_file_path))
    #
    # logging.debug('model_file_path set to {}'.format(model_file_path))

    model_input_size_file_path = path.join(models_dir_path, 'input_size.txt')

    if not path.isfile(model_input_size_file_path):
        raise ValueError(
            'The model input size file specified at the path {} '
            'could not be found.'.format(model_input_size_file_path))

    logging.debug('model_input_size_file_path set to {}'.format(
        model_input_size_file_path))

    with open(model_input_size_file_path) as file:
        model_input_size_string = file.readline().rstrip()

        valid_size_set = ['224', '299']

        if model_input_size_string not in valid_size_set:
            raise ValueError(
                'The model input size is not in the set {}.'.format(
                    valid_size_set))

        model_input_size = int(model_input_size_string)

    # if logpath is the default value, expand it using the SNVA_HOME prefix,
    # otherwise, use the value explicitly passed by the user
    if args.outputpath == 'reports':
        output_dir_path = path.join(snva_home, args.outputpath)
    else:
        output_dir_path = args.outputpath
    logging.info("Output path set to: {}".format(output_dir_path))
    if not path.isdir(output_dir_path):
        os.makedirs(output_dir_path)

    if args.classnamesfilepath is None \
        or not path.isfile(args.classnamesfilepath):
        class_names_path = path.join(models_root_dir_path, 'class_names.txt')
    else:
        class_names_path = args.classnamesfilepath
    logging.debug('labels path set to: {}'.format(class_names_path))

    num_processes = args.numprocesses

    class_name_map = IO.read_class_names(class_names_path)

    return_code_queue_map = {}
    child_logger_thread_map = {}
    child_process_map = {}

    total_num_processed_videos = 0
    total_num_processed_frames = 0
    total_analysis_duration = 0

    def start_video_processor(video_file_path):
        # Before popping the next video off of the list and creating a process to
        # scan it, check to see if fewer than logical_device_count + 1 processes are
        # active. If not, Wait for a child process to release its semaphore
        # acquisition. If so, acquire the semaphore, pop the next video name,
        # create the next child process, and pass the semaphore to it
        return_code_queue = Queue()

        return_code_queue_map[video_file_path] = return_code_queue

        logging.debug('creating new child process.')

        child_log_queue = Queue()

        child_logger_thread = Thread(target=child_logger_fn,
                                     args=(log_queue, child_log_queue))

        child_logger_thread.start()

        child_logger_thread_map[video_file_path] = child_logger_thread

        if 'signalstate' == args.processormode:
            child_process = Process(
                target=process_video_signalstate,
                name=path.splitext(path.split(video_file_path)[1])[0],
                args=(video_file_path, output_dir_path, class_name_map,
                      args.modelname, args.modelsignaturename,
                      args.modelserverhost, model_input_size,
                      return_code_queue, child_log_queue, log_level,
                      ffmpeg_path, ffprobe_path, args.crop, args.cropwidth,
                      args.cropheight, args.cropx, args.cropy,
                      args.extracttimestamps, args.timestampmaxwidth,
                      args.timestampheight, args.timestampx, args.timestampy,
                      args.deinterlace, args.numchannels, args.batchsize,
                      args.smoothprobs, args.smoothingfactor,
                      args.binarizeprobs, args.writebbox,
                      args.writeeventreports, args.maxanalyzerthreads,
                      args.processormode))
        else:
            child_process = Process(
                target=process_video,
                name=path.splitext(path.split(video_file_path)[1])[0],
                args=(video_file_path, output_dir_path, class_name_map,
                      args.modelname, args.modelsignaturename,
                      args.modelserverhost, model_input_size,
                      return_code_queue, child_log_queue, log_level,
                      ffmpeg_path, ffprobe_path, args.crop, args.cropwidth,
                      args.cropheight, args.cropx, args.cropy,
                      args.extracttimestamps, args.timestampmaxwidth,
                      args.timestampheight, args.timestampx, args.timestampy,
                      args.deinterlace, args.numchannels, args.batchsize,
                      args.smoothprobs, args.smoothingfactor,
                      args.binarizeprobs, args.writeinferencereports,
                      args.writeeventreports, args.maxanalyzerthreads,
                      args.processormode))
        logging.debug('starting child process.')

        child_process.start()

        child_process_map[video_file_path] = child_process

    async def close_completed_video_processors(total_num_processed_videos,
                                               total_num_processed_frames,
                                               total_analysis_duration,
                                               websocket_conn):
        for video_file_path in list(return_code_queue_map.keys()):
            return_code_queue = return_code_queue_map[video_file_path]

            try:
                return_code_map = return_code_queue.get_nowait()

                return_code = return_code_map['return_code']
                return_value = return_code_map['return_value']

                child_process = child_process_map[video_file_path]

                logging.debug(
                    'child process {} returned with exit code {} and exit value '
                    '{}'.format(child_process.pid, return_code, return_value))

                if return_code == 'success':
                    total_num_processed_videos += 1
                    total_num_processed_frames += return_value
                    total_analysis_duration += return_code_map[
                        'analysis_duration']

                    logging.info('notifying control node of completion')

                    complete_request = json.dumps({
                        'action':
                        'COMPLETE',
                        'video':
                        os.path.basename(video_file_path),
                        'output':
                        return_code_map['output_locations']
                    })
                    await websocket_conn.send(complete_request)

                child_logger_thread = child_logger_thread_map[video_file_path]

                logging.debug(
                    'joining logger thread for child process {}'.format(
                        child_process.pid))

                child_logger_thread.join(timeout=15)

                if child_logger_thread.is_alive():
                    logging.warning(
                        'logger thread for child process {} remained alive following join '
                        'timeout'.format(child_process.pid))

                logging.debug('joining child process {}'.format(
                    child_process.pid))

                child_process.join(timeout=15)

                # if the child process has not yet terminated, kill the child process at
                # the risk of losing any log message not yet buffered by the main logger
                try:
                    os.kill(child_process.pid, signal.SIGKILL)
                    logging.warning(
                        'child process {} remained alive following join timeout and had to '
                        'be killed'.format(child_process.pid))
                except:
                    pass

                return_code_queue.close()

                return_code_queue_map.pop(video_file_path)
                child_logger_thread_map.pop(video_file_path)
                child_process_map.pop(video_file_path)
            except Empty:
                pass

        return total_num_processed_videos, total_num_processed_frames, \
               total_analysis_duration

    start = time()

    sleep_duration = 1
    breakLoop = False
    connectionId = None
    isIdle = False
    while True:
        try:
            if breakLoop:
                break
            wsUrl = 'ws://' + args.controlnodehost + '/registerProcess'
            if connectionId is not None:
                wsUrl = wsUrl + '?id=' + connectionId
            logging.debug("Connecting with URL {}".format(wsUrl))
            async with ws.connect(wsUrl) as conn:
                response = await conn.recv()
                response = json.loads(response)
                logging.info(response)

                if response['action'] != 'CONNECTION_SUCCESS':
                    raise ConnectionError(
                        'control node connection failed with response: {}'.
                        format(response))
                if connectionId is None:
                    connectionId = response['id']
                logging.debug("Assigned id {}".format(connectionId))
                while True:
                    # block if num_processes child processes are active
                    while len(return_code_queue_map) >= num_processes:
                        total_num_processed_videos, total_num_processed_frames, \
                        total_analysis_duration = await close_completed_video_processors(
                          total_num_processed_videos, total_num_processed_frames,
                          total_analysis_duration, conn)
                        sleep(sleep_duration)

                    try:  # todo poll for termination signal from control node
                        _ = main_interrupt_queue.get_nowait()
                        logging.debug(
                            'breaking out of child process generation following interrupt signal'
                        )
                        break
                    except:
                        pass

                    if not isIdle:
                        logging.info('requesting video')
                        request = json.dumps({'action': 'REQUEST_VIDEO'})
                        await conn.send(request)
                        logging.info('reading response')
                        response = await conn.recv()
                    else:
                        # If idle, we will try to close completed processors until all are done
                        while len(return_code_queue_map) > 0:
                            # Before checking for completed processes, check for a new message
                            logging.info('Checking for new message')
                            try:
                                # If we get a response quickly, break our waiting loop and process the command
                                response = await asyncio.wait_for(
                                    conn.recv(), 1)
                                break
                            except asyncio.TimeoutError:
                                # Otherwise, go back to finishing our current tasks
                                logging.debug(
                                    'No new message from control node, continuing...'
                                )
                                pass
                            total_num_processed_videos, total_num_processed_frames, \
                            total_analysis_duration = await close_completed_video_processors(
                              total_num_processed_videos, total_num_processed_frames,
                              total_analysis_duration, conn)
                            # by now, the last device_id_queue_len videos are being processed,
                            # so we can afford to poll for their completion infrequently
                            if len(return_code_queue_map) > 0:
                                sleep(sleep_duration)
                        # Once all are complete, if still idle we have no work left to do - we just wait for a new message
                        response = await conn.recv()

                    response = json.loads(response)

                    if response['action'] == 'STATUS_REQUEST':
                        logging.info('control node requested status request')
                        pass
                    elif response['action'] == 'CEASE_REQUESTS':
                        logging.info(
                            'control node has no more videos to process')
                        isIdle = True
                        pass
                    elif response['action'] == 'RESUME_REQUESTS':
                        logging.info(
                            'control node has instructed to resume requests')
                        isIdle = False
                        pass
                    elif response['action'] == 'SHUTDOWN':
                        logging.info('control node requested shutdown')
                        breakLoop = True
                        break
                    elif response['action'] == 'PROCESS':
                        # TODO Prepend input path
                        video_file_path = os.path.join(args.inputpath,
                                                       response['path'])
                        request_received = json.dumps({
                            'action': 'REQUEST_RECEIVED',
                            'video': response['path']
                        })
                        await conn.send(request_received)
                        try:
                            start_video_processor(video_file_path)
                        except Exception as e:
                            logging.error(
                                'an unknown error has occured while processing {}'
                                .format(video_file_path))
                            logging.error(e)
                    else:
                        raise ConnectionError(
                            'control node replied with unexpected response: {}'
                            .format(response))
                logging.debug('{} child processes remain enqueued'.format(
                    len(return_code_queue_map)))
                while len(return_code_queue_map) > 0:
                    #logging.debug('waiting for the final {} child processes to '
                    #              'terminate'.format(len(return_code_queue_map)))

                    total_num_processed_videos, total_num_processed_frames, \
                    total_analysis_duration = await close_completed_video_processors(
                      total_num_processed_videos, total_num_processed_frames,
                      total_analysis_duration, conn)

                    # by now, the last device_id_queue_len videos are being processed,
                    # so we can afford to poll for their completion infrequently
                    if len(return_code_queue_map) > 0:
                        #logging.debug('sleeping for {} seconds'.format(sleep_duration))
                        sleep(sleep_duration)

                end = time() - start

                processing_duration = IO.get_processing_duration(
                    end,
                    'snva {} processed a total of {} videos and {} frames in:'.
                    format(snva_version_string, total_num_processed_videos,
                           total_num_processed_frames))
                logging.info(processing_duration)

                logging.info(
                    'Video analysis alone spanned a cumulative {:.02f} '
                    'seconds'.format(total_analysis_duration))

                logging.info(
                    'exiting snva {} main process'.format(snva_version_string))
                breakLoop = True
        except socket.gaierror:
            # log something
            logging.info('gaierror')
            continue
        except ConnectionRefusedError:
            # log something else
            logging.info('connection refused')
            break
        except ws.exceptions.ConnectionClosed:
            logging.info('Connection lost.  Attempting reconnect...')
            continue
        except Exception as e:
            logging.error("Unknown Exception")
            logging.error(e)
            raise e
        if breakLoop:
            break
示例#18
0
def process_video(video_file_path, output_dir_path, class_name_map,
                  model_input_size, device_id_queue, return_code_queue,
                  log_queue, log_level, device_type, logical_device_count,
                  physical_device_count, ffmpeg_path, ffprobe_path, model_path,
                  node_name_map, gpu_memory_fraction, do_crop, crop_width,
                  crop_height, crop_x, crop_y, do_extract_timestamps,
                  timestamp_max_width, timestamp_height, timestamp_x,
                  timestamp_y, do_deinterlace, num_channels, batch_size,
                  do_smooth_probs, smoothing_factor, do_binarize_probs):
    configure_logger(log_level, log_queue)

    interrupt_queue = Queue()

    child_interrupt_queue = Queue()

    def interrupt_handler(signal_number, _):
        logging.warning('received interrupt signal {}.'.format(signal_number))

        interrupt_queue.put_nowait('_')

        # TODO: cancel timestamp/report generation when an interrupt is signalled
        logging.debug('instructing inference pipeline to halt.')
        child_interrupt_queue.put_nowait('_')

    signal.signal(signal.SIGINT, interrupt_handler)

    video_file_name = path.basename(video_file_path)
    video_file_name, _ = path.splitext(video_file_name)

    logging.info('preparing to analyze {}'.format(video_file_path))

    try:
        start = time()

        frame_width, frame_height, num_frames = IO.get_video_dimensions(
            video_file_path, ffprobe_path)

        end = time() - start

        processing_duration = IO.get_processing_duration(
            end, 'read video dimensions in')

        logging.info(processing_duration)
    except Exception as e:
        logging.error('encountered an unexpected error while fetching video '
                      'dimensions')
        logging.error(e)

        logging.debug(
            'will exit with code: exception and value get_video_dimensions')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'get_video_dimensions'
        })
        return_code_queue.close()

        return

    try:
        do_crop = should_crop(frame_width, frame_height, do_crop, crop_width,
                              crop_height, crop_x, crop_y)
    except Exception as e:
        logging.error(e)

        logging.debug('will exit with code: exception and value should_crop')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'should_crop'
        })
        return_code_queue.close()

        return

    logging.debug('Constructing ffmpeg command')

    ffmpeg_command = [ffmpeg_path, '-i', video_file_path]

    if do_deinterlace:
        ffmpeg_command.append('-deinterlace')

    ffmpeg_command.extend([
        '-vcodec', 'rawvideo', '-pix_fmt', 'rgb24', '-vsync', 'vfr',
        '-hide_banner', '-loglevel', '0', '-f', 'image2pipe', 'pipe:1'
    ])

    try:
        do_extract_timestamps = should_extract_timestamps(
            frame_width, frame_height, do_extract_timestamps,
            timestamp_max_width, timestamp_height, timestamp_x, timestamp_y)
    except Exception as e:
        logging.error(e)

        logging.debug(
            'will exit with code: exception and value should_extract_timestamps'
        )
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'should_extract_timestamps'
        })
        return_code_queue.close()

        return

    frame_shape = [frame_height, frame_width, num_channels]

    logging.debug('FFmpeg output frame shape == {}'.format(frame_shape))

    def release_device_id(device_id, device_id_queue):
        try:
            logging.debug(
                'attempting to unset CUDA_VISIBLE_DEVICES environment variable.'
            )
            os.environ.pop('CUDA_VISIBLE_DEVICES')
        except KeyError as ke:
            logging.warning(ke)

        logging.debug('released {} device with id {}'.format(
            device_type, device_id))

        device_id_queue.put(device_id)
        device_id_queue.close()

    result_queue = Queue(1)

    analyzer = VideoAnalyzer(
        frame_shape, num_frames, len(class_name_map), batch_size,
        model_input_size, model_path, device_type, logical_device_count,
        os.cpu_count(), node_name_map, gpu_memory_fraction,
        do_extract_timestamps, timestamp_x, timestamp_y, timestamp_height,
        timestamp_max_width, do_crop, crop_x, crop_y, crop_width, crop_height,
        ffmpeg_command, child_interrupt_queue, result_queue, video_file_name)

    device_id = device_id_queue.get()

    logging.debug('acquired {} device with id {}'.format(
        device_type, device_id))

    try:
        _ = child_interrupt_queue.get_nowait()

        release_device_id(device_id, device_id_queue)

        logging.debug(
            'will exit with code: interrupt and value: process_video')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'interrupt',
            'return_value': 'process_video'
        })
        return_code_queue.close()

        return
    except:
        pass

    if device_type == 'gpu':
        mapped_device_id = str(int(device_id) % physical_device_count)
        logging.debug(
            'mapped logical device_id {} to physical device_id {}'.format(
                device_id, mapped_device_id))

        logging.debug('setting CUDA_VISIBLE_DEVICES environment variable to '
                      '{}.'.format(mapped_device_id))
        os.environ['CUDA_VISIBLE_DEVICES'] = mapped_device_id
    else:
        logging.debug(
            'Setting CUDA_VISIBLE_DEVICES environment variable to None.')
        os.environ['CUDA_VISIBLE_DEVICES'] = ''

    try:
        start = time()

        analyzer.start()

        num_analyzed_frames, probability_array, timestamp_array = result_queue.get(
        )

        analyzer.terminate()

        result_queue.close()

        end = time()

        analysis_duration = end - start

        processing_duration = IO.get_processing_duration(
            analysis_duration,
            'processed {} frames in'.format(num_analyzed_frames))
        logging.info(processing_duration)

        analyzer.join(timeout=15)

        try:
            os.kill(analyzer.pid, signal.SIGKILL)
            logging.debug(
                'analyzer process {} remained alive following join timeout '
                'and had to be killed'.format(analyzer.pid))
        except:
            pass

        release_device_id(device_id, device_id_queue)

        if num_analyzed_frames != num_frames:
            if interrupt_queue.empty():
                raise AssertionError('num_analyzed_frames ({}) != num_frames '
                                     '({})'.format(num_analyzed_frames,
                                                   num_frames))
            else:
                raise InterruptedError(
                    'num_analyzed_frames ({}) != num_frames '
                    '({})'.format(num_analyzed_frames, num_frames))
    except InterruptedError as ae:
        logging.error(ae)

        release_device_id(device_id, device_id_queue)

        logging.debug(
            'will exit with code: interrupt and value: analyze_video')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'interrupt',
            'return_value': 'analyze_video'
        })
        return_code_queue.close()

        return
    except AssertionError as ae:
        logging.error(ae)

        release_device_id(device_id, device_id_queue)

        logging.debug(
            'will exit with code: assertion error and value: analyze_video')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'assertion error',
            'return_value': 'analyze_video'
        })
        return_code_queue.close()

        return
    except Exception as e:
        logging.error(
            'encountered an unexpected error while analyzing {}'.format(
                video_file_name))
        logging.error(e)

        release_device_id(device_id, device_id_queue)

        logging.debug(
            'will exit with code: exception and value: analyze_video')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'analyze_video'
        })
        return_code_queue.close()

        return

    logging.debug('converting timestamp images to strings')

    if do_extract_timestamps:
        try:
            start = time()

            timestamp_object = Timestamp(timestamp_height, timestamp_max_width)
            timestamp_strings, qa_flags = \
              timestamp_object.stringify_timestamps(timestamp_array)

            end = time() - start

            processing_duration = IO.get_processing_duration(
                end, 'timestamp strings converted in')

            logging.info(processing_duration)
        except Exception as e:
            logging.error('encountered an unexpected error while converting '
                          'timestamp image crops to strings'.format(
                              os.getpid()))
            logging.error(e)

            logging.debug(
                'will exit with code: exception and value: stringify_timestamps'
            )
            log_queue.put(None)
            log_queue.close()

            return_code_queue.put({
                'return_code': 'exception',
                'return_value': 'stringify_timestamps'
            })
            return_code_queue.close()

            return
    else:
        timestamp_strings = None
        qa_flags = None

    logging.debug('attempting to generate reports')

    try:
        start = time()

        IO.write_inference_report(video_file_name, output_dir_path,
                                  probability_array, class_name_map,
                                  timestamp_strings, qa_flags, do_smooth_probs,
                                  smoothing_factor, do_binarize_probs)

        end = time() - start

        processing_duration = IO.get_processing_duration(
            end, 'generated inference reports in')
        logging.info(processing_duration)
    except Exception as e:
        logging.error(
            'encountered an unexpected error while generating inference report.'
        )
        logging.error(e)

        logging.debug(
            'will exit with code: exception and value: write_inference_report')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'write_inference_report'
        })
        return_code_queue.close()

        return

    try:
        start = time()

        if do_smooth_probs:
            probability_array = IO.smooth_probs(probability_array,
                                                smoothing_factor)

        frame_numbers = [i + 1 for i in range(len(probability_array))]

        if timestamp_strings is not None:
            timestamp_strings = timestamp_strings.astype(np.int32)

        trip = Trip(frame_numbers, timestamp_strings, qa_flags,
                    probability_array, class_name_map)

        work_zone_events = trip.find_work_zone_events()

        if len(work_zone_events) > 0:
            logging.info('{} work zone events were found in {}'.format(
                len(work_zone_events), video_file_name))

            IO.write_event_report(video_file_name, output_dir_path,
                                  work_zone_events)
        else:
            logging.info(
                'No work zone events were found in {}'.format(video_file_name))

        end = time() - start

        processing_duration = IO.get_processing_duration(
            end, 'generated event reports in')
        logging.info(processing_duration)
    except Exception as e:
        logging.error(
            'encountered an unexpected error while generating event report.')
        logging.error(e)

        logging.debug(
            'will exit with code: exception and value: write_event_report')
        log_queue.put(None)
        log_queue.close()

        return_code_queue.put({
            'return_code': 'exception',
            'return_value': 'write_event_report'
        })
        return_code_queue.close()

        return

    logging.debug('will exit with code: success and value: {}'.format(
        num_analyzed_frames))
    log_queue.put(None)
    log_queue.close()

    return_code_queue.put({
        'return_code': 'success',
        'return_value': num_analyzed_frames,
        'analysis_duration': analysis_duration
    })
    return_code_queue.close()
示例#19
0
文件: model.py 项目: iyush/manta
    def fit(self, x, y, batch_size) -> None:
        # first we need to convert to tensorflow tensors
        x = tf.convert_to_tensor(x, dtype=tf.float32)
        y = tf.convert_to_tensor(y, dtype=tf.float32)

        # if self.model_exists:
        #     IO.debug("model already exists, so no need to train!")
        #     return

        self.model.summary()

        # Prepare the validation dataset.
        # Reserve 10,000 samples for validation.
        # Validation set and the training set should be disjoint sets.
        validation_split = int(0.2 * x.shape[0])

        x_val = x[-validation_split:]
        y_val = y[-validation_split:]
        val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
        val_dataset = val_dataset.batch(64)

        x_train = x[:-validation_split]
        y_train = y[:-validation_split]

        # divide into training batches of size batch_size
        train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
        train_dataset = train_dataset.shuffle(
            buffer_size=1024).batch(batch_size)

        epochs = 600

        # create path for saving weights
        for epoch in range(epochs):
            time_start = time.time_ns()
            # train over batches
            loss_value = 0
            for step, (x_batch_train,
                       y_batch_train) in enumerate(train_dataset):
                loss = self.train_step(neighbourhood=x_batch_train,
                                       pressure=y_batch_train).numpy()
                if step % 100 == 0:
                    IO.debug("Epoch: {:03d} \t Loss: {:01.5f}".format(
                        epoch, loss),
                             end="\r")
                loss_value += loss

            # get validation error
            for (val_x, val_y) in val_dataset:
                predicted_pressure = self.model(val_x, training=False)
                self.validation_metric.update_state(val_y, predicted_pressure)

            # display loss and validation_error information
            val_error = self.validation_metric.result()
            deug_info = "{} Epoch: {:03d} \t Loss: {:01.5f} \t Validation error {:01.5f}"
            IO.debug(
                deug_info.format(
                    datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S"),
                    epoch, loss_value / step, val_error))

            # save the information to the tensorboard
            with self.val_summary_writer.as_default():
                tf.summary.scalar("epoch_loss", val_error.numpy(), step=epoch)
            with self.train_summary_writer.as_default():
                tf.summary.scalar("epoch_loss", loss_value / step, step=epoch)

            # reset the validation metric and loss at the end of each epoch
            self.validation_metric.reset_states()

            # save model at the of each epoch, to a file specified as in checkpoint_path
            checkpoint_path = self.model_path + "/weights.epoch{:04d}.hdf5".format(
                epoch)
            self.model.save_weights(checkpoint_path)

            time_end = time.time_ns()
            print("Epoch took: {:01.5f} minutes.".format(
                (time_end - time_start) / (1e9 * 60)))
            print("Saved model in: {} ".format(checkpoint_path))
        return None