Пример #1
0
def main(args):
    log_handler(LOGGER)
    LOGGER.info(args)
    gpu_candidate = args.gpus.split(',')

    # prepare arguments
    root_path = Path(args.root)
    outdir = Path(args.outdir)
    video_paths = list(root_path.glob('./**/*.avi'))

    gpu_queue = Queue()
    for i in gpu_candidate:
        gpu_queue.put(i)

    # multithreading
    th_jobs = []
    all_th_args = zip([gpu_queue]*len(video_paths), \
                      video_paths, \
                      [outdir]*len(video_paths), \
                      [args.option]*len(video_paths))

    for th_args in all_th_args:
        job = Thread(target=demo_one_video, args=th_args)
        th_jobs.append(job)
        job.start()
    for j in th_jobs:
        j.join()
    LOGGER.info('Complete multithread processing with pipeline ')
Пример #2
0
def main(args: argparse.Namespace):
    """an interface to activate pyqt5 app"""
    logger = logging.getLogger(__name__)
    log_handler(logger)
    logger.info(args)
    with open(args.config, 'r') as config_file:
        config = yaml.load(config_file)

    video_path = Path(args.video)
    output_path = Path('outputs')
    if not output_path.exists():
        output_path.mkdir(parents=True)
    label_path = output_path / '{}_label.csv'.format(video_path.stem)
    label_path = output_path / str(Path(
        args.output)) if args.output else label_path
    if not label_path.parent.exists():
        label_path.parent.mkdir(parents=True)

    app = QApplication(sys.argv)
    video_app = VideoApp(args.video, str(label_path), **config)
    try:
        log_handler(video_app.logger)
        app.exec()
    except Exception as e:
        logger.exception(e)
Пример #3
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    logger = logging.getLogger(__name__)
    log_handler(logger, logging.getLogger('src.utils'))
    logger.info(args)

    # prepare data
    X_test = list(Path(args.test).glob('*/*'))
    y_true = [path.parent.name for path in X_test]
    y_true = list(map(int, y_true))
    y_true = np_utils.to_categorical(y_true)
    X_test = np.array([
        np.array(Image.open(i).resize(args.input_shape), dtype='float32')
        for i in X_test
    ])
    X_test = X_test / 255
    logger.info('X_test shape={}, Y_test shape={}'.format(
        X_test.shape, y_true.shape))

    # models
    for model_path in args.models:
        K.clear_session()
        if args.focal_loss:
            model = load_model(
                model_path,
                custom_objects={'focal_loss_fixed': focal_loss(2, 2)})
        else:
            model = load_model(model_path)

        # predict
        y_pred = model.predict(X_test)
        savepath_y_pred = str(Path(model_path).with_name('y_pred.npy'))
        np.save(savepath_y_pred, y_pred)

        # metrics
        accuracy = top_n_accuracy(y_pred, y_true, 1)
        cnf_matrix = confusion_matrix(y_true.argmax(axis=1),
                                      y_pred.argmax(axis=1))
        savepath_cnf_matrix = str(
            Path(model_path).with_name('confusion_matrix.jpg'))
        plot_confusion_matrix(
            cnf_matrix,
            classes=list('OX=A'),
            to_img=savepath_cnf_matrix,
            normalize=True,
            title='Confusion matrix (acc={:.4f})'.format(accuracy))
        logger.info('model: {}'.format(model_path))
        logger.info(' - top-1-accuracy: {:.4f}'.format(accuracy))
        logger.info(
            ' - save confusion matrix at {}'.format(savepath_cnf_matrix))
        logger.info(' - save y_pred at {}'.format(savepath_y_pred))
        print(
            classification_report(y_true.argmax(axis=1),
                                  y_pred.argmax(axis=1),
                                  target_names=list('OX=A')))
Пример #4
0
def main(args):
    logger = logging.getLogger(__name__)
    log_handler(logger, logging.getLogger('src.visualize'))
    logger.info(args)
    record_path = Path(args.record)
    save_video_path = record_path.parent / f'{record_path.stem}_{args.options}.avi'
    save_video_path = save_video_path if args.save_video else None

    with open(args.config, 'r') as f:
        config = json.load(f)['outputs']
    mouse_contours = None
    if args.mouse_contours:
        with open(args.mouse_contours, 'r') as f:
            mouse_contours = json.load(f)
    if args.save_video:
        if not save_video_path.parent.exists():
            save_video_path.parent.mkdir(parents=True)
        save_video_path = str(save_video_path)

    if args.options in ['detection', 'classification']:
        with open(args.record, 'r') as f:
            records = f.readlines()
            records = [eval(line) for line in records]
        df = convert_to_dataframe(records, args.options)
        show_and_save_video(args.video,
                            df,
                            config,
                            from_=args.from_idx,
                            show_video=args.show_video,
                            save_video=save_video_path,
                            pause_flag=args.pause)
    elif args.options == 'track':
        df = pd.read_csv(args.record)
        show_and_save_video(args.video,
                            df,
                            config,
                            from_=args.from_idx,
                            mouse_contours=mouse_contours,
                            show_video=args.show_video,
                            save_video=save_video_path,
                            pause_flag=args.pause)
    elif args.options == 'action':
        df = pd.read_csv(args.record)
        if Path(args.record).name != 'fixed_action_paths.csv':
            df = convert_to_dataframe(df, args.options)
            df.to_csv(Path(args.record).with_name('fixed_action_paths.csv'))
        show_and_save_video(args.video,
                            df,
                            config,
                            from_=args.from_idx,
                            show_video=args.show_video,
                            save_video=save_video_path,
                            pause_flag=args.pause)
Пример #5
0
def main(args: argparse.Namespace):
    # setting log
    out_dir_path = Path(args.outdir)
    log_name = f"{datetime.now().strftime('%m%dT%H%M%S')}-{datetime.now().microsecond}.log"
    log_path = out_dir_path / log_name
    if not out_dir_path.exists():
        out_dir_path.mkdir(parents=True)
    logger = logging.getLogger(__name__)
    log_handler(logger, logname=log_path if args.islog else None)
    logger.info(args)

    # run pyqt5
    app = QApplication(sys.argv)
    tracker = KalmanFilterTracker()
    log_handler(tracker.logger)
    app.exec()
Пример #6
0
def main(args: argparse.Namespace):
    """optical flow interface

    Arguments:
        args {argparse.Namespace} -- parameter parse fomr terminal
    """
    # config
    logger = logging.getLogger(__name__)
    log_handler(logger)
    logger.info(args)
    with open(CONFIG_FILE) as config_file:
        config = yaml.load(config_file)

    # demo code for optical flow testing
    with Video(args.video) as video:
        prev_frame = video.read_frame(0)
        prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)

        # limit the feature area
        pos_0 = cv2.goodFeaturesToTrack(prev_gray[:300, :400],
                                        mask=None,
                                        **config['shitomasi'])
        mask = np.zeros_like(prev_frame)

        while True:
            frame_img = video.read_frame(video.frame_idx + 1)
            frame_gray = cv2.cvtColor(frame_img, cv2.COLOR_BGR2GRAY)

            # calc optical flow
            pos_1, status, err = cv2.calcOpticalFlowPyrLK(prev_gray, frame_gray, \
                                                          pos_0, None, **config['lk'])
            good_new = pos_1[status == 1]
            good_old = pos_0[status == 1]

            for new, old in zip(good_new, good_old):
                pt1, pt2 = tuple(new.ravel()), tuple(old.ravel())
                mask = cv2.line(mask, pt1, pt2, (0, 255, 0), 2)
                frame_img = cv2.circle(frame_img, pt1, 5, (0, 0, 255), -1)
            img = cv2.add(frame_img, mask)
            cv2.imshow('frame', img)
            k = cv2.waitKey(1)
            if k in [27, ord('q')]:
                cv2.destroyAllWindows()
                break
            prev_gray = frame_gray.copy()
            pos_0 = good_new.reshape(-1, 1, 2)
Пример #7
0
def main(args):
    log_handler(LOGGER, logging.getLogger('src.utils'))
    LOGGER.info(args)

    # prepare data
    func_set = {
        'avg': ensemble_avg,
        'vote': ensemble_vote
    }
    X_test = list(Path(args.ground_truth).glob('*/*'))
    y_true = [path.parent.name for path in X_test]
    y_true = np.array(list(map(int, y_true)))
    if args.predict_all:
        y_preds = Path(args.predictions_dir).glob('*/y_pred.npy')
        y_preds = list(map(str, y_preds))
        for combine_length in range(2, len(y_preds)):
            _checkpoint = (None, 0)
            for combine_set in combinations(y_preds, combine_length):
                local_y_preds = np.array([np.load(i) for i in combine_set])
                local_y_pred = func_set.get(args.method)(local_y_preds)
                accuracy = accuracy_score(local_y_pred, y_true, 1)
                if _checkpoint[1] < accuracy:
                    _checkpoint = (combine_set, accuracy)
            print('Limit len {:02d} - combination={}, acc={:.4f}'.format(
                combine_length, sorted(map(lambda x: x.split('/')[1], _checkpoint[0])), _checkpoint[1]
            ))
    else:
        y_preds = np.array([np.load(i) for i in args.predictions])
        LOGGER.info('predictions shape - {}'.format(y_preds.shape))
        y_pred = func_set.get(args.method)(y_preds)
        LOGGER.info('ensemble predictions shape - {}'.format(y_pred.shape))

        # metrics
        accuracy = accuracy_score(y_pred, y_true, 1)
        cnf_matrix = confusion_matrix(y_true, y_pred)
        savepath_cnf_matrix = str(Path('.') / 'ensemble_confusion_matrix.jpg')
        plot_confusion_matrix(cnf_matrix,
                            classes=list('OX=A'),
                            to_img=savepath_cnf_matrix,
                            normalize=True,
                            title='Confusion matrix (acc={:.4f})'.format(accuracy))
        LOGGER.info('Ensemble top-1-accuracy - {:.4f}'.format(accuracy))
        print(classification_report(y_true, y_pred, target_names=list('OX=A')))
Пример #8
0
def main(args):
    logger = logging.getLogger(__name__)
    log_handler(logger)
    logger.info(args)

    outdir = Path(__file__).parent / '../output/path'
    if not outdir.exists():
        logger.info('{} not found, created.'.format(str(outdir)))
        outdir.mkdir(parents=True)

    observer_filepath = Path(args.observer)
    paths_filepath = Path(args.paths)
    fixed_observer_savepath = paths_filepath.parent / 'fixed_observer.csv'

    # get fps from video
    cap = cv2.VideoCapture(args.video)
    fps = cap.get(cv2.CAP_PROP_FPS)
    cap.release()
    logger.info('video fps = {}'.format(fps))

    # clean observer file
    df_observer = pd.read_csv(observer_filepath)
    df_observer = df_observer[[
        'Time_Relative_hmsf', 'Duration_sf', 'Subject', 'Behavior',
        'Modifier_1', 'Event_Type'
    ]]
    df_observer = df_observer.dropna(subset=['Modifier_1'])
    df_observer.columns = [ \
        'timestamp_hmsf', 'duration', 'subject', \
        'behavior', 'target', 'event_type']
    df_observer['label'] = df_observer.apply(
        lambda row: row['subject'].split(' ')[-1], axis=1)
    df_observer['timestamp_hmsf'] = pd.to_datetime(
        df_observer['timestamp_hmsf'])
    df_observer['timestamp_hmsf'] = [
        t.time() for t in df_observer['timestamp_hmsf']
    ]
    df_observer.to_csv(fixed_observer_savepath, index=False)
    logger.info('Complete to clean and save observer file at {}'.format(
        fixed_observer_savepath))

    # padding the records in duration
    df_padding_event = df_observer.copy().reset_index(drop=True)
    df_padding_event['frame_idx'] = df_padding_event.apply( \
        lambda x: hmsf_to_idx(x['timestamp_hmsf'], fps), axis=1)
    for index, row in df_observer.iterrows():
        if row['event_type'] == 'State start':
            logger.info('From row {:02} - {}'.format(index, row.tolist()))
            for end_index, end_row in df_observer.loc[index + 1:, ].iterrows():
                check_end_row = end_row['subject':'target'].tolist()
                check_row = row['subject':'target'].tolist()
                check_same_event = (check_end_row == check_row)
                check_same_event = check_same_event and end_row[
                    'event_type'] == 'State stop'
                if check_same_event:

                    # create the DaaFrame to pad the loss record per event
                    # date_range build wiht milliseconds
                    time1 = datetime.datetime.combine(datetime.date.min,
                                                      row['timestamp_hmsf'])
                    time2 = datetime.datetime.combine(
                        datetime.date.min, end_row['timestamp_hmsf'])
                    delta = time2 - time1
                    ts = pd.date_range(str(row['timestamp_hmsf']),
                                       periods=delta.total_seconds() * 1000,
                                       freq='L',
                                       closed='right')
                    ts = [t.time() for t in ts.tolist()]
                    ts_data = {
                        'timestamp_hmsf': ts,
                        'duration': [0.0] * len(ts),
                        'subject': [row['subject']] * len(ts),
                        'behavior': [row['behavior']] * len(ts),
                        'target': [row['target']] * len(ts),
                        'event_type': ['State processing'] * len(ts),
                        'label': [row['label']] * len(ts)
                    }
                    df_ts = pd.DataFrame(data=ts_data)
                    df_ts['frame_idx'] = df_ts.apply( \
                        lambda x: hmsf_to_idx(x['timestamp_hmsf'], fps), axis=1)
                    df_ts.drop_duplicates(subset=['frame_idx'],
                                          keep='first',
                                          inplace=True)
                    df_padding_event = pd.concat([df_padding_event, df_ts],
                                                 sort=False)
                    logger.info('To row {:02} - {}\n{}'.format(
                        end_index, end_row.tolist(), '-' * 87))
                    break

    df_padding_event.reset_index(drop=True, inplace=True)
    df_padding_event.drop_duplicates(subset=['frame_idx', 'label'],
                                     keep='first',
                                     inplace=True)
    logger.info('Complete to create the padding dataframe, shape={}'.format(
        df_padding_event.shape))

    # join action and path
    # observer precise on millisecond, timestamp_ms should be remove the microsecond part
    df_paths = pd.read_csv(paths_filepath)
    df_final = pd.merge(df_paths,
                        df_padding_event,
                        how='left',
                        on=['frame_idx', 'label'])
    df_final = add_group_id(df_final,
                            'block_idx',
                            'behavior',
                            'target',
                            gid_colname='action_idx')
    logger.info('paths file shape={}'.format(df_paths.shape))
    logger.info('Compete merge to final dataframe, shape={}'.format(
        df_final.shape))

    # save final result
    action_path_filepath = paths_filepath.parent / 'action_paths.csv'
    df_final.to_csv(action_path_filepath, index=False)
Пример #9
0
def main(args):
    logdir = Path('logs')
    outdir = Path(args.outdir)
    trackpath_dir = outdir / Path(args.video).stem
    if not logdir.exists():
        logdir.mkdir(parents=True)
    if not trackpath_dir.exists():
        trackpath_dir.mkdir(parents=True)

    logger = logging.getLogger(__name__)
    log_handler(logger,
                *LOGGERS,
                logname=str(logdir / args.log) if args.log else None)
    logger.info(args)

    trackflow = build_flow(args.video, args.input, args.config)

    # get timestamp and save path to file
    video_filename = str(Path(args.video).stem)
    cap = cv2.VideoCapture(args.video)
    fps = cap.get(cv2.CAP_PROP_FPS)
    total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    cap.release()

    trackflow_all_label = []
    label_col = sorted(trackflow.ref_keys)
    for label, flow in trackflow.paths.items():
        logger.info('get timestamp and convert {}'.format(label))
        for bbox in tqdm(flow):
            bbox.timestamp = bbox.frame_idx / fps * 1000
            label_score = [
                bbox.multiclass_result.get(l, None) for l in label_col
            ]
            trackflow_all_label.append([
                video_filename, fps, total_frames, bbox.frame_idx,
                bbox.block_id, bbox.timestamp, label, bbox.confidence,
                *label_score, bbox.block_confidence, *bbox.pt1, *bbox.pt2,
                *bbox.center, *bbox_behavior_encoding(bbox.behavior)
            ])

    trackflow_all_label = sorted(trackflow_all_label, key=lambda x: x[0])
    label_col = ['{}_score'.format(l) for l in label_col]
    trackflow_cols = [
        'video_name', 'video_fps', 'video_nframes', 'frame_idx', 'block_idx',
        'timestamp_ms', 'label', 'detect_score', *label_col, 'block_score',
        'pt1.x', 'pt1.y', 'pt2.x', 'pt2.y', 'center.x', 'center.y', 'on_mouse'
    ]
    path_savepath = str(trackpath_dir / 'paths.csv')
    df_paths = pd.DataFrame(trackflow_all_label, columns=trackflow_cols)
    df_paths.to_csv(path_savepath, index=False)

    # save mouse contours
    mouse_filename = '{}_mouse.json'.format(Path(args.video).stem)
    mouse_cnts_filepath = str(trackpath_dir / mouse_filename)
    mouse_cnts = {
        str(m.frame_idx): {
            'contour': m.contour.tolist(),
            'contour_extend': m.contour_extend.tolist(),
            'contour_extend_kernel': m.contour_extend_kernel.tolist(),
            'center': m.center.tolist()
        }
        for m in trackflow.mouse_cnts
    }
    with open(mouse_cnts_filepath, 'w+') as f:
        json.dump(mouse_cnts, f)

    # show and/or save video
    video_savepath = None
    if args.save_video:
        video_savepath = str(trackpath_dir / args.outvideo)
    if args.show_video or args.save_video:
        with open(args.config, 'r') as f:
            config = json.load(f)
            show_and_save_video(args.video,
                                df_paths,
                                config['outputs'],
                                from_=args.from_idx,
                                show_video=args.show_video,
                                save_video=video_savepath,
                                pause_flag=args.pause)
Пример #10
0
def main(args):
    # preprocess
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(args.gpus)
    outdir = Path('outputs') / args.name
    if not outdir.exists():
        outdir.mkdir(parents=True)
    log_handler(*LOGGERS, logname=str(outdir / 'log.txt'))

    if Path(args.config).exists():
        with open(args.config, 'r') as f:
            config = json.load(f)
    LOGGER.info(args)

    # build and compile model
    K.clear_session()
    model = get_model(args, config)
    model.model = multi_gpu_model(model.model, gpus=len(
        args.gpus)) if len(args.gpus) > 1 else model.model
    optimizer = get_optimizer(config[args.backend]['optimizer'],
                              lr=config[args.backend]['lr'])
    loss = focal_loss(gamma=2, alpha=2)
    model.model.compile(loss=[loss],
                        optimizer=optimizer,
                        metrics=[config[args.backend]['metrics']])
    estimate_gbytes = get_model_memory_usage(config[args.backend]['bz'],
                                             model.model)
    model.model.summary(print_fn=lambda x: LOGGER.info(x + '\n'))
    LOGGER.info(
        'Estimate model required {} gbytes GPU memory'.format(estimate_gbytes))

    # preprocess generator
    _data_gen_params = {
        'target_size': config[args.backend]['keras']['input_shape'][:2],
        'batch_size': config[args.backend]['bz'],
        'class_mode': 'categorical',
        'shuffle': True
    }
    count_train_data = len(list(Path(args.train).glob('**/*')))
    count_test_data = len(list(Path(args.test).glob('**/*')))
    train_data_aug = ImageDataGenerator(**config['imgaug']['train'])
    test_data_aug = ImageDataGenerator(**config['imgaug']['test'])
    train_batch = train_data_aug.flow_from_directory(args.train,
                                                     **_data_gen_params)
    test_batch = test_data_aug.flow_from_directory(args.test,
                                                   **_data_gen_params)
    train_data_gen = normalize_generator(train_batch)
    test_data_gen = normalize_generator(test_batch)
    LOGGER.info(
        'Complete generator preprocess with {} traing data and {} test data'.
        format(count_train_data, count_test_data))

    # callbacks
    model_savepath = str(outdir / '{}.h5'.format(args.backend))
    checkpoint = ModelCheckpoint(model_savepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    earlystop = EarlyStopping(monitor='val_loss',
                              min_delta=0,
                              patience=25,
                              verbose=1,
                              mode='auto')
    reducelr = ReduceLROnPlateau(monitor='val_loss',
                                 factor=0.8,
                                 patience=5,
                                 min_lr=1e-10,
                                 verbose=1)
    tensorboard = TensorBoard(log_dir=str(outdir / 'tensorboard'),
                              write_grads=True,
                              write_images=True,
                              write_graph=False)
    LOGGER.info('Complete callbacks declarement')

    # train
    history = model.model.fit_generator(
        train_data_gen,
        steps_per_epoch=count_train_data // config[args.backend]['bz'],
        epochs=config[args.backend]['epochs'],
        validation_data=test_data_gen,
        validation_steps=count_test_data // config[args.backend]['bz'],
        callbacks=[checkpoint, earlystop, reducelr, tensorboard])

    # save model config
    history_dataframe = pd.DataFrame(history.history)
    history_dataframe.to_csv(str(outdir / 'history.csv'), index=False)
    record_config = model.config
    record_config['imgaug'] = config['imgaug']
    if args.outimg:
        plot_model(model.model, to_file=str(outdir / 'model_structure.jpg'))
    with open(str(outdir.parent / 'README.txt'), 'a+') as f:
        f.write('{}\t-\t{}\n'.format(args.name, args.comment))
    with open(str(outdir / 'config.json'), 'w+') as f:
        json.dump(record_config, f, indent=4)
Пример #11
0
def main(args: argparse.Namespace):
    """[summary]
    an interface to handle input and parse to different method

    Arguments:
        args {argparse.Namespace} -- parameter parse fomr terminal
    """
    # config
    logger = logging.getLogger(__name__)
    log_handler(logger, logging.getLogger(Video.__class__.__name__))
    logger.info(args)
    with open(CONFIG_FILE) as config_file:
        config = yaml.load(config_file)

    # test single frame from video and the conventional find contour method
    with Video(args.input) as video:

        # multiprocessing calc the contour
        pending_frame_idx = list(range(0, video.frame_count, config['general']['skip_per_nframe']))
        logger.info('process pending frame index: %d', len(pending_frame_idx))
        logger.info('cpu count: %d (will used %d)', cpu_count(), (cpu_count()*3//4))
        with Pool(processes=(cpu_count()*3//4)) as pool:
            # basic contours
            mp_args = zip([args.input]*len(pending_frame_idx), \
                           pending_frame_idx, \
                           [config]*len(pending_frame_idx), \
                           [args.preprocess]*len(pending_frame_idx), \
                           [args.option]*len(pending_frame_idx))
            mp_targets = pool.starmap_async(get_contour_from_video, mp_args)
            mp_targets = mp_targets.get()
            mp_targets = [DetectionTarget(*i) for i in mp_targets if i]
            mp_targets = sorted(mp_targets, key=lambda x: x.frame_idx)
            _basic_target_counts = len(mp_targets)

            # interpolate contours
            _check_frame_len = 0
            while True:
                # find the new pending index
                pair_targets = [(mp_targets[i], mp_targets[i+1]) for i in range(len(mp_targets)-1)]
                check_frame_idx = [i.is_shifting(j, config['general']['tolerable_shifting_dist']) \
                                   for i, j in pair_targets]
                if len(check_frame_idx) == _check_frame_len:
                    break
                _check_frame_len = len(check_frame_idx)
                logger.info('check_frame_idx len=%d (all=%d), any=%s', len(check_frame_idx), video.frame_count, any(check_frame_idx))
                if not any(check_frame_idx):
                    break
                pending_frame_idx = list(compress(pair_targets, check_frame_idx))
                pending_frame_idx = [(i.frame_idx+j.frame_idx)//2 \
                                     for i, j in pending_frame_idx if i.frame_idx+1 < j.frame_idx]

                # multiprocessing calc
                mp_args = zip([args.input]*len(pending_frame_idx), \
                              pending_frame_idx, \
                              [config]*len(pending_frame_idx), \
                              [args.preprocess]*len(pending_frame_idx), \
                              [args.option]*len(pending_frame_idx))
                mp_interpolate_targets = pool.starmap_async(get_contour_from_video, mp_args)
                mp_interpolate_targets = mp_interpolate_targets.get()
                mp_targets += [DetectionTarget(*i) for i in mp_interpolate_targets if i]
                mp_targets = sorted(mp_targets, key=lambda x: x.frame_idx)

            logger.info('#contours after interpolate: %d -> %d', \
                        _basic_target_counts, len(mp_targets))
            video.detect_targets += mp_targets
        video.save(args.savepath, draw_cnts=True)
Пример #12
0
def main(args):
    logger = logging.getLogger(__name__)
    log_handler(logger)
    logger.info(args)

    # setting GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.5
    K.tensorflow_backend.set_session(tf.Session(config=config))

    # prepare video and detection results
    video_path = Path(args.video)
    cap = cv2.VideoCapture(str(video_path))
    with open(args.input, 'r') as f:
        bbox_results = f.readlines()
        bbox_results = [eval(b.rstrip('\n')) for b in bbox_results]

    # prepare model predict results
    y_preds = {}
    for model_path in args.models:
        logger.info('model - {}'.format(model_path))
        K.clear_session()
        model = load_model(
            model_path, custom_objects={'focal_loss_fixed': focal_loss(2, 2)})
        y_preds[model_path] = []

        for record in tqdm(bbox_results):
            frame_idx, bboxes = record
            model_predict_bbox = deepcopy(bboxes)
            cap.set(1, frame_idx - 1)
            success, frame = cap.read()
            if success:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                for bbox_idx, bbox in enumerate(model_predict_bbox):
                    y1, x1, y2, x2 = [int(i) for i in bbox[:4]]
                    y1, x1, y2, x2, prob = *list(
                        map(lambda x: max(x, 0), (y1, x1, y2, x2))), bbox[-1]
                    crop_img = frame[y1:y2, x1:x2, ...]
                    img_shape = tuple(model.input.shape.as_list()[1:3])
                    img = cv2.resize(crop_img,
                                     img_shape,
                                     interpolation=cv2.INTER_AREA)
                    img = img[np.newaxis, ...]
                    img = img / 255.
                    y_pred = model.predict(img)
                    model_predict_bbox[bbox_idx].append({
                        'O': y_pred[0][0],
                        'X': y_pred[0][1],
                        '=': y_pred[0][2],
                        'A': y_pred[0][3]
                    })
            else:
                logger.exception(
                    'Read frame faile at index {}'.format(frame_idx))
            y_preds[model_path].append([frame_idx, model_predict_bbox])
        logger.info('Complete model predict - {}'.format(model_path))

    # ensemble result
    reference_result = list(y_preds.keys())[0]
    reference_result = y_preds[reference_result]
    savepath = Path(args.input)
    savepath = savepath.parent / '{}_ensemble.txt'.format(
        str(Path(args.video).stem))
    with open(str(savepath), 'w') as f:
        for idx in tqdm(range(len(reference_result))):
            frame_idx, bboxes = reference_result[idx]
            merge_bboxes = []

            for bbox_idx, bbox in enumerate(bboxes):
                y_pred = np.array([ \
                    [v[idx][-1][bbox_idx][-1][k] for v in y_preds.values()] \
                    for k in bbox[-1].keys()])
                y_pred = np.average(y_pred, axis=-1)
                merge_bboxes.append([*bbox[:-1], \
                                    {k: y_pred[idx] for idx, k in enumerate(bbox[-1].keys())}])

            f.write(str([frame_idx, merge_bboxes]))
            if idx != len(reference_result) - 1:
                f.write('\n')
    logger.info('Save final result at {}'.format(str(savepath)))
    cap.release()
Пример #13
0
def main(args):
    logger = logging.getLogger(__name__)
    log_handler(logger)
    logger.info(args)

    # setting GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    num_gpus = len(args.gpus.split(','))
    available_gpus = get_available_gpus()
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.5
    K.tensorflow_backend.set_session(tf.Session(config=config))
    logger.info('available_gpus: {}'.format(available_gpus))
    assert len(args.models) == num_gpus

    # prepare detection results
    with open(args.input, 'r') as f:
        bbox_results = f.readlines()
        bbox_results = [eval(b.rstrip('\n')) for b in bbox_results]

    # prepare model predict results in multithread
    queue = Queue()
    videos = [args.video] * num_gpus
    detect = [bbox_results] * num_gpus
    thread_jobs = []
    for model_args in zip(available_gpus, videos, args.models, detect):
        model_args = list(model_args)
        model_args.append(queue)
        job = Thread(target=inference, args=model_args)
        thread_jobs.append(job)
        job.start()
    for j in thread_jobs:
        logger.info('join jobs {}'.format(j))
        j.join()
    logger.info('Complete multithread inference')
    logger.info('Current thread {}'.format(current_thread()))

    y_preds = {}
    while not queue.empty():
        model_path, y_pred = queue.get()
        y_preds[model_path] = y_pred

    # ensemble result
    logger.info(y_preds.keys())
    reference_result = list(y_preds.keys())[0]
    reference_result = y_preds[reference_result]
    savepath = Path(args.input)
    savepath = savepath.parent / '{}_ensemble.txt'.format(
        str(Path(args.video).stem))
    with open(str(savepath), 'w') as f:
        for idx in tqdm(range(len(reference_result))):
            frame_idx, bboxes = reference_result[idx]
            merge_bboxes = []

            for bbox_idx, bbox in enumerate(bboxes):
                y_pred = np.array([ \
                    [v[idx][-1][bbox_idx][-1][k] for v in y_preds.values()] \
                    for k in bbox[-1].keys()])
                y_pred = np.average(y_pred, axis=-1)
                merge_bboxes.append([*bbox[:-1], \
                                    {k: y_pred[idx] for idx, k in enumerate(bbox[-1].keys())}])

            f.write(str([frame_idx, merge_bboxes]))
            if idx != len(reference_result) - 1:
                f.write('\n')
    logger.info('Save final result at {}'.format(str(savepath)))