def main(argv):

    parser = argparse.ArgumentParser()
    add_args.for_general(parser)
    add_args.for_inference(parser)
    add_args.for_evaluation(parser)
    add_args.for_feature(parser)
    add_args.for_lstm(parser)
    args = parser.parse_args()

    config = tf.estimator.RunConfig(save_summary_steps=float('inf'),
                                    log_step_count_steps=10)

    params = {
        'image_size': args.image_size,
        'gazemap_size': args.gazemap_size,
        'feature_map_size': args.feature_map_size,
        'model_dir': args.model_dir
    }

    model = tf.estimator.Estimator(model_fn=model_fn,
                                   model_dir=args.model_dir,
                                   config=config,
                                   params=params)

    #determine which checkpoint to restore
    if args.model_iteration is None:
        best_ckpt_dir = os.path.join(args.model_dir, 'best_ckpt')
        if os.path.isdir(best_ckpt_dir):
            ckpt_name = [
                f.split('.index')[0] for f in os.listdir(best_ckpt_dir)
                if f.endswith('.index')
            ][0]
            ckpt_path = os.path.join(best_ckpt_dir, ckpt_name)
            args.model_iteration = ckpt_name.split('-')[1]
    else:
        ckpt_name = 'model.ckpt-' + model_iteration
        ckpt_path = os.path.join(args.model_dir, ckpt_name)

    predict_generator = model.predict(
        input_fn=lambda: input_fn('validation',
                                  batch_size=1,
                                  n_steps=None,
                                  shuffle=False,
                                  include_labels=False,
                                  n_epochs=1,
                                  args=args),
        checkpoint_path=ckpt_path)

    output_dir = os.path.join(args.model_dir,
                              'prediction_iter_' + args.model_iteration)
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    for res in predict_generator:
        output_path = os.path.join(
            output_dir,
            str(res['video_id']) + '_' +
            str(res['predicted_time_points'][0]).zfill(5) + '.jpg')
        gazemap = np.reshape(res['ps'], args.gazemap_size)
        misc.imsave(output_path, gazemap)
Esempio n. 2
0
def main(argv):
    parser = argparse.ArgumentParser()
    add_args.for_general(parser)
    add_args.for_inference(parser)
    add_args.for_evaluation(parser)
    add_args.for_feature(parser)
    add_args.for_lstm(parser)
    args = parser.parse_args()
    
    config = tf.estimator.RunConfig(save_summary_steps=float('inf'),
                                      log_step_count_steps=10)
    
    params = {
        'image_size': args.image_size,
        'gazemap_size': args.gazemap_size,
        'model_dir': args.model_dir
      }
    model = tf.estimator.Estimator(
        model_fn=model_fn,
        model_dir=args.model_dir,
        config=config,
        params=params)
    
    #determine which checkpoint to restore
    '''
    if args.model_iteration is None:
    	print("model iteration is none")
    	best_ckpt_dir = 'F:\vinay\driver_attention_prediction\pretrained_models\model_for_inference\best_ckpt'
    	if os.path.isdir(best_ckpt_dir):
    		
    		ckpt_name = [f.split('.index')[0] for f in os.listdir(best_ckpt_dir) if f.endswith('.index')][0]
    		ckpt_path = 'F:\vinay\driver_attention_prediction\pretrained_models\model_for_inference\best_ckpt'
    		args.model_iteration = 'model.ckpt'
    '''
    if args.model_iteration is None:
    	print('model iteration is none')
    	best_ckpt_dir = os.path.join(args.model_dir, 'best_ckpt')
    	if os.path.isdir(best_ckpt_dir):
    		print('second loop model iteration none')
    		ckpt_name = [f.split('.index')[0] for f in os.listdir(best_ckpt_dir) if f.endswith('.index')][0]
    		print(ckpt_name)
    		ckpt_path = os.path.join(best_ckpt_dir, ckpt_name)
    		print(ckpt_path)
    		args.model_iteration = ckpt_name.split('-')[1]
    		print("*************")
    		print(args.model_iteration)
    else:
    	print('model iteration is not none')
    	ckpt_name = 'model.ckpt-'+args.model_iteration
    	ckpt_path = os.path.join(args.model_dir, ckpt_name)    

    K.clear_session()
    predict_generator = model.predict(
        input_fn = lambda: input_fn(None, 
            batch_size=1, n_steps=None, 
            shuffle=False,
            n_epochs=1, args=args),
        checkpoint_path=ckpt_path)
    
    output_dir = os.path.join(args.data_dir, 'image_features_'+args.feature_name)
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    
    previous_video_id = None
    for res in predict_generator:
        if previous_video_id is None:
            print('Start inference for video: %s' % res['video_id'])
            previous_video_id = res['video_id']
        elif res['video_id'] != previous_video_id:
            print('Start inference for video: %s' % res['video_id'])
            previous_video_id = res['video_id']
            
        output_path = os.path.join(output_dir, 
            str(res['video_id'])+'_'+str(res['predicted_time_points'][0]).zfill(5)+'.npy')
        
        feature_map = res['feature_maps']
        np.save(output_path, feature_map)
import feather

import data_point_collector as dpc


def _int64_feature(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))


def _bytes_feature(value):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))


parser = argparse.ArgumentParser()
add_args.for_general(parser)
add_args.for_lstm(parser)
parser.add_argument('--n_divides', type=int, default=1)
parser.add_argument('--feature_name', type=str, default='alexnet')

args = parser.parse_args()

camera_folder = os.path.join(args.data_dir, 'camera_images')
tfrecord_folder = os.path.join(args.data_dir, 'tfrecords')

if not os.path.isdir(tfrecord_folder):
    os.makedirs(tfrecord_folder)

data_point_names = dpc.get_data_point_names(args.data_dir,
                                            in_sequences=True,
                                            longest_seq=args.longest_seq)
Esempio n. 4
0
def main(argv):

    parser = argparse.ArgumentParser()
    add_args.for_general(parser)
    add_args.for_inference(parser)
    add_args.for_feature(parser)
    add_args.for_training(parser)
    add_args.for_lstm(parser)
    args = parser.parse_args()

    config = tf.estimator.RunConfig(save_summary_steps=float('inf'),
                                    log_step_count_steps=10)

    params = {
        'image_size': args.image_size,
        'gazemap_size': args.gazemap_size,
        'feature_map_size': args.feature_map_size,
        'model_dir': args.model_dir,
        'weight_data': args.weight_data,
        'epsilon': 1e-12,
        'learning_rate': args.learning_rate,
        'quick_summary_period': args.quick_summary_period,
        'slow_summary_period': args.slow_summary_period,
    }

    model = tf.estimator.Estimator(model_fn=model_fn,
                                   model_dir=args.model_dir,
                                   config=config,
                                   params=params)

    # set up the directory to save the best checkpoint
    best_ckpt_dir = os.path.join(args.model_dir, 'best_ckpt')
    if not os.path.isdir(best_ckpt_dir) or len(os.listdir(best_ckpt_dir)) == 0:
        smallest_loss = float('Inf')
        if not os.path.isdir(best_ckpt_dir):
            os.makedirs(best_ckpt_dir)
    else:
        smallest_loss = [
            float(f.split('_')[1]) for f in os.listdir(best_ckpt_dir)
            if f.startswith('loss_')
        ][0]

    for _ in range(args.train_epochs // args.epochs_before_validation):
        # Train the model.
        K.clear_session()
        model.train(
            input_fn=lambda: input_fn('training',
                                      args.batch_size,
                                      args.n_steps,
                                      shuffle=True,
                                      include_labels=True,
                                      n_epochs=args.epochs_before_validation,
                                      args=args,
                                      weight_data=args.weight_data))
        # validate the model
        K.clear_session()
        valid_results = model.evaluate(
            input_fn=lambda: input_fn('validation',
                                      batch_size=1,
                                      n_steps=None,
                                      shuffle=False,
                                      include_labels=True,
                                      n_epochs=1,
                                      args=args,
                                      weight_data=False))
        print(valid_results)

        if -valid_results['custom_cc'] < smallest_loss:
            smallest_loss = -valid_results['custom_cc']
            # delete best_ckpt_dir
            shutil.rmtree(best_ckpt_dir)
            # re-make best_ckpt_dir as empty
            os.makedirs(best_ckpt_dir)
            # note down the new smallest loss
            open(os.path.join(best_ckpt_dir, 'loss_%f' % smallest_loss),
                 'a').close()
            # copy the checkpoint
            files_to_copy = [
                f for f in os.listdir(args.model_dir)
                if f.startswith('model.ckpt-' +
                                str(valid_results['global_step']))
            ]
            for f in files_to_copy:
                shutil.copyfile(os.path.join(args.model_dir, f),
                                os.path.join(best_ckpt_dir, f))
def main(argv):
    parser = argparse.ArgumentParser()
    add_args.for_general(parser)
    add_args.for_inference(parser)
    add_args.for_evaluation(parser)
    add_args.for_feature(parser)
    add_args.for_lstm(parser)
    args = parser.parse_args()
    # if 'session' in locals() and session is not None:
    #     print('Close interactive session')
    #     session.close()
    config = tf.estimator.RunConfig(save_summary_steps=float('inf'),
                                    log_step_count_steps=10)

    params = {
        'image_size': args.image_size,
        'gazemap_size': args.gazemap_size,
        'model_dir': args.model_dir
    }
    model = tf.estimator.Estimator(model_fn=model_fn,
                                   model_dir=args.model_dir,
                                   config=config,
                                   params=params)

    #determine which checkpoint to restore
    if args.model_iteration is None:
        best_ckpt_dir = os.path.join(args.model_dir, 'best_ckpt')
        if os.path.isdir(best_ckpt_dir):
            ckpt_name = [
                f.split('.index')[0] for f in os.listdir(best_ckpt_dir)
                if f.endswith('.index')
            ][0]
            ckpt_path = os.path.join(best_ckpt_dir, ckpt_name)
            args.model_iteration = ckpt_name.split('-')[1]
    else:
        ckpt_name = 'model.ckpt-' + args.model_iteration
        ckpt_path = os.path.join(args.model_dir, ckpt_name)

    predict_generator = model.predict(input_fn=lambda: input_fn('inference',
                                                                batch_size=1,
                                                                n_steps=None,
                                                                shuffle=False,
                                                                n_epochs=1,
                                                                args=args),
                                      checkpoint_path=ckpt_path)

    output_dir = os.path.join(args.model_dir,
                              'prediction_iter_' + args.model_iteration)
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    previous_video_id = None
    for res in predict_generator:
        if previous_video_id is None:
            print('Start inference for video: %s' % res['video_id'])
            previous_video_id = res['video_id']
        elif res['video_id'] != previous_video_id:
            print('Start inference for video: %s' % res['video_id'])
            previous_video_id = res['video_id']

        output_path = os.path.join(
            output_dir,
            str(res['video_id']) + '_' +
            str(res['predicted_time_points'][0]).zfill(5) + '.jpg')
        gazemap = np.reshape(res['ps'], args.gazemap_size)
        # misc.imsave(output_path, gazemap)
        imageio.imwrite(output_path, normalize_map(gazemap).astype(np.uint8))
def main(argv):

    parser = argparse.ArgumentParser()
    add_args.for_general(parser)
    add_args.for_inference(parser)
    add_args.for_evaluation(parser)
    add_args.for_feature(parser)
    add_args.for_lstm(parser)
    args = parser.parse_args()

    config = tf.estimator.RunConfig(save_summary_steps=float('inf'))

    params = {
        'camera_size': args.camera_size,
        'small_camera_size': args.small_camera_size,
        'visual_size': args.visual_size,
        'model_dir': args.model_dir,
        'use_foveal': args.use_foveal,
        'foveal_only': args.foveal_only,
        'attention_model_dir': args.attention_model_dir,
        'weight_data': args.weight_data,
        'epsilon': 1e-12,
        'readout': args.readout,
        'output_details': True,
        'sample_fovea': args.sample_fovea,
        'attention_logit_factor': args.attention_logit_factor,
        'premade_attention_maps': args.premade_attention_maps,
        'premade_features': args.premade_features,
        'feature_map_size': args.feature_map_size,
        'gazemap_size': args.gazemap_size,
        'random_fovea': args.random_fovea,
    }

    if args.visible_gpus is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus

    model = tf.estimator.Estimator(model_fn=model_fn,
                                   model_dir=args.model_dir,
                                   config=config,
                                   params=params)

    #determine which checkpoint to restore
    if args.model_iteration is None:
        best_ckpt_dir = os.path.join(args.model_dir, 'best_ckpt')
        if os.path.isdir(best_ckpt_dir):
            ckpt_name = [
                f.split('.index')[0] for f in os.listdir(best_ckpt_dir)
                if f.endswith('.index')
            ][0]
            ckpt_path = os.path.join(best_ckpt_dir, ckpt_name)
            args.model_iteration = ckpt_name.split('-')[1]
    else:
        ckpt_name = 'model.ckpt-' + model_iteration
        ckpt_path = os.path.join(args.model_dir, ckpt_name)

    predict_generator = model.predict(
        input_fn=lambda: input_fn('test',
                                  batch_size=args.batch_size,
                                  n_steps=args.n_steps,
                                  shuffle=False,
                                  include_labels=True,
                                  n_epochs=1,
                                  args=args),
        checkpoint_path=ckpt_path)

    output_dir = os.path.join(args.model_dir,
                              'prediction_iter_' + args.model_iteration)
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    dfs = []
    video_ids = []
    for res in tqdm(predict_generator):
        n_steps = len(res['predicted_time_points'])
        video_id = res['video_id'].decode("utf-8")
        if '/' in video_id:
            video_id = video_id.split('/')[-1].split('.')[0]
        df = pd.DataFrame.from_dict({
            'video_key': [len(video_ids)] * n_steps,  # start from 0 but not 1
            'time_point': res['predicted_time_points'],
            'speed_x': res['speed'][:, 0],
            'speed_y': res['speed'][:, 1],
            'output_speed': res['output_speeds'][:, 0],
            'likelihood': res['likelihood'],
            'overlap': res['overlap'],
        })
        dfs.append(df)
        video_ids.append(video_id)

    output_df = pd.concat(dfs)
    feather.write_dataframe(output_df,
                            os.path.join(output_dir, 'outputs.feather'))
    video_df = pd.DataFrame(data={
        'video_key': range(len(video_ids)),
        'video_id': video_ids,
    })
    feather.write_dataframe(video_df, os.path.join(output_dir,
                                                   'videos.feather'))
def main(argv):
    parser = argparse.ArgumentParser()
    add_args.for_general(parser)
    add_args.for_inference(parser)
    add_args.for_evaluation(parser)
    add_args.for_feature(parser)
    add_args.for_lstm(parser)
    parser.add_argument('--max_length',
                        default=310,
                        type=int,
                        help="maximum length of one tfrecord")
    parser.add_argument(
        '--data_subset',
        default='test',
        type=str,
        help=
        "the tfrecords of which subset to divide, i.e., training, validation or test"
    )
    args = parser.parse_args()
    MAX_LENGTH = args.max_length

    if args.visible_gpus is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus

    sess = tf.Session()

    dataset = args.data_subset
    output_dir = args.data_dir + '/' + 'tfrecords_segments' + '/' + dataset
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    this_input_fn = lambda: input_fn(
        dataset, args.validation_batch_size, n_epochs=1, args=args)

    ds = this_input_fn()
    iterator = ds.make_one_shot_iterator()
    next_element = iterator.get_next()
    while True:
        try:
            res = sess.run(next_element)

            video_path = res['video_id'].decode("utf-8")
            video_id = video_path.split('/')[-1].split('.')[0]
            #premade_feature_dir = '/data/alexnet_features/' + dataset
            #premade_features = np.load(os.path.join(premade_feature_dir, video_id+'.npy'))

            length = len(res['cameras'])
            for i in range(math.ceil(float(length) / MAX_LENGTH)):
                startIdx = i * MAX_LENGTH
                endIdx = min((i + 1) * MAX_LENGTH, length)
                example = tf.train.Example(features=tf.train.Features(
                    feature={
                        'image/class/video_name':
                        _bytes_feature([video_path.encode('utf-8')]),
                        'image/encoded':
                        _bytes_feature(res['cameras'][startIdx:endIdx]),
                        'image/low_res':
                        _bytes_feature(res['low_res_cameras']
                                       [startIdx:endIdx]),
                        'image/speeds':
                        _float_feature(res['speed'][startIdx:endIdx].ravel().
                                       tolist()),  # ravel l*2 into list
                        'time_points':
                        _int64_feature(res['time_points']
                                       [startIdx:endIdx].tolist()),
                        #'image/premade_features': _float_feature(premade_features[startIdx:endIdx].ravel().tolist()),
                    }))

                writer = tf.python_io.TFRecordWriter(
                    os.path.join(
                        output_dir,
                        video_id + '_' + str(i).zfill(2) + '.tfrecords'))
                writer.write(example.SerializeToString())
                writer.close()

            print(video_id)

        except tf.errors.OutOfRangeError:
            break
Esempio n. 8
0
def main(argv):

    parser = argparse.ArgumentParser()
    add_args.for_general(parser)
    add_args.for_inference(parser)
    add_args.for_feature(parser)
    add_args.for_training(parser)
    add_args.for_lstm(parser)
    args = parser.parse_args()

    config = tf.estimator.RunConfig(
        save_summary_steps=float('inf'),
        log_step_count_steps=args.quick_summary_period,
        keep_checkpoint_max=60)

    params = {
        'camera_size': args.camera_size,
        'small_camera_size': args.small_camera_size,
        'visual_size': args.visual_size,
        'model_dir': args.model_dir,
        'use_foveal': args.use_foveal,
        'random_fovea': args.random_fovea,
        'foveal_only': args.foveal_only,
        'attention_model_dir': args.attention_model_dir,
        'weight_data': args.weight_data,
        'epsilon': 1e-12,
        'learning_rate': args.learning_rate,
        'quick_summary_period': args.quick_summary_period,
        'slow_summary_period': args.slow_summary_period,
        'readout': args.readout,
        'augment_data': args.augment_data,
        'feature_map_size': args.feature_map_size,
        'gazemap_size': args.gazemap_size,
        'stability_loss_weight': args.stability_loss_weight,
        'sample_fovea': args.sample_fovea,
        'attention_logit_factor': args.attention_logit_factor,
        'premade_attention_maps': args.premade_attention_maps,
        'premade_features': args.premade_features,
    }

    if args.visible_gpus is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus

    model = tf.estimator.Estimator(model_fn=model_fn,
                                   model_dir=args.model_dir,
                                   config=config,
                                   params=params)

    # set up the directory to save the best checkpoint
    best_ckpt_dir = os.path.join(args.model_dir, 'best_ckpt')
    if not os.path.isdir(best_ckpt_dir) or len(os.listdir(best_ckpt_dir)) == 0:
        smallest_loss = float('Inf')
        if not os.path.isdir(best_ckpt_dir):
            os.makedirs(best_ckpt_dir)
    else:
        smallest_loss = [
            float(f.split('_')[1]) for f in os.listdir(best_ckpt_dir)
            if f.startswith('loss_')
        ][0]

    for _ in range(args.train_epochs // args.epochs_before_validation):
        # Train the model.
        K.clear_session()
        model.train(
            input_fn=lambda: input_fn('training',
                                      args.batch_size,
                                      args.n_steps,
                                      shuffle=True,
                                      include_labels=True,
                                      n_epochs=args.epochs_before_validation,
                                      args=args,
                                      weight_data=args.weight_data,
                                      augment_data=args.augment_data))
        # validate the model
        K.clear_session()
        valid_results = model.evaluate(
            input_fn=lambda: input_fn('validation',
                                      batch_size=args.validation_batch_size,
                                      n_steps=args.validation_n_steps,
                                      shuffle=False,
                                      include_labels=True,
                                      n_epochs=1,
                                      args=args,
                                      weight_data=False))
        print(valid_results)

        if valid_results['mae'] < smallest_loss:
            smallest_loss = valid_results['mae']
            # delete best_ckpt_dir
            shutil.rmtree(best_ckpt_dir)
            # re-make best_ckpt_dir as empty
            os.makedirs(best_ckpt_dir)
            # note down the new smallest loss
            open(os.path.join(best_ckpt_dir, 'loss_%f' % smallest_loss),
                 'a').close()
            # copy the checkpoint
            files_to_copy = [
                f for f in os.listdir(args.model_dir)
                if f.startswith('model.ckpt-' +
                                str(valid_results['global_step']))
            ]
            for f in files_to_copy:
                shutil.copyfile(os.path.join(args.model_dir, f),
                                os.path.join(best_ckpt_dir, f))