示例#1
0
def run_training( cfg ):
    # set up logging
    tf.logging.set_verbosity( tf.logging.INFO )

    with tf.Graph().as_default() as g:
        # create ops and placeholders
        inputs = utils.setup_input( cfg, is_training=False, use_filename_queue=True )
        RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
        RuntimeDeterminedEnviromentVars.populate_registered_variables()

        # build model (and losses and train_op)
        model = setup_model( inputs, cfg, is_training=False )

        # set up metrics to evaluate
        names_to_values, names_to_updates = setup_metrics( inputs, model, cfg )

        # execute training 
        start_time = time.time()
        utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )

        training_runners = { 'sess': tf.Session(), 'coord': tf.train.Coordinator() }
        data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn( inputs, cfg, is_training=False, use_filename_queue=True )
        training_runners[ 'threads' ] = data_prefetch_init_fn( training_runners[ 'sess' ], training_runners[ 'coord' ] )
        try:
            # This just returns the imput as output. It is for testing data
            #  input only. 
            for step in xrange( inputs[ 'max_steps' ] ):
                input_batch, target_batch, data_idx = training_runners['sess'].run( [ 
                        model['input_batch'],  model['target_batch'], model[ 'data_idxs' ] ] )

                if training_runners['coord'].should_stop():
                    break
        finally:
            utils.request_data_loading_end( training_runners )
            utils.end_data_loading_and_sess( training_runners )
        # else: # Use tf.slim
        #     train_log_dir = os.path.join( cfg['log_dir'], 'slim-train' )

        #     # When ready to use a model, use the code below
        #     train(  model[ 'train_op' ],
        #             train_log_dir,
        #             get_data_prefetch_threads_init_fn( inputs, cfg ), 
        #             global_step=model[ 'global_step' ],
        #             number_of_steps=inputs[ 'max_steps' ],
        #             init_fn=model[ 'init_fn' ],
        #             save_summaries_secs=300,
        #             save_interval_secs=600,
        #             saver=model[ 'saver_op' ] ) 

        end_train_time = time.time() - start_time
        print('time to train %d epochs: %.3f hrs' % (cfg['num_epochs'], end_train_time/(60*60)))
        print('avg time per epoch: %.3f hrs' % ( (end_train_time/(60*60)) / cfg['num_epochs']) )
def run_training( cfg, cfg_dir ):
    # set up logging
    tf.logging.set_verbosity( tf.logging.INFO )

    with tf.Graph().as_default() as g:
        # create ops and placeholders
        inputs = utils.setup_input_transfer( cfg, is_training=True )
        RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
        RuntimeDeterminedEnviromentVars.populate_registered_variables()
        
        # build model (and losses and train_op)
        model = utils.setup_model_chained_transfer( inputs, cfg, is_training=True )

        # execute training 
        start_time = time.time()
        utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=True )
        train_log_dir = os.path.join( cfg['log_dir'], 'slim-train' )
        permanent_checkpoint_dir = os.path.join( cfg['log_dir'], 'checkpoints' )

        session_config = tf.ConfigProto()
        session_config.gpu_options.allow_growth = True
        # When ready to use a model, use the code below
        train(  model[ 'train_op' ],
                train_log_dir,
                utils.get_data_prefetch_threads_init_fn_transfer( inputs, cfg, is_training=True ),
                train_step_fn=model[ 'train_step_fn' ], 
                train_step_kwargs=model[ 'train_step_kwargs' ], 
                global_step=model[ 'global_step' ],
                number_of_steps=inputs[ 'max_steps' ],
                number_of_epochs=cfg['num_epochs'],
                init_fn=model[ 'init_fn' ],
                save_checkpoint_every=inputs['max_steps'] // (cfg['num_epochs'] * 2),
                cfg_dir=cfg_dir,
                #RuntimeDeterminedEnviromentVars.steps_per_epoch,
                permanent_checkpoint_dir=permanent_checkpoint_dir,
                save_summaries_secs=cfg['summary_save_every_secs'],
                save_interval_secs=cfg['checkpoint_save_every_secs'],
                saver=model[ 'saver_op' ], 
                return_accuracy= 'return_accuracy' in cfg and cfg['return_accuracy'],
                    session_config=session_config ) 

        end_train_time = time.time() - start_time
        print('time to train %d epochs: %.3f hrs' % (cfg['num_epochs'], end_train_time/(60*60)))
        print('avg time per epoch: %.3f hrs' % ( (end_train_time/(60*60)) / cfg['num_epochs']) )
示例#3
0
def run_training(cfg):
    # set up logging
    tf.logging.set_verbosity(tf.logging.INFO)

    with tf.Graph().as_default() as g:
        # create ops and placeholders
        inputs = utils.setup_input(cfg,
                                   is_training=False,
                                   use_filename_queue=True)
        RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
        RuntimeDeterminedEnviromentVars.populate_registered_variables()

        # build model (and losses and train_op)
        model = setup_model(inputs, cfg, is_training=False)

        # set up metrics to evaluate
        names_to_values, names_to_updates = setup_metrics(inputs, model, cfg)

        # execute training
        start_time = time.time()
        utils.print_start_info(cfg, inputs['max_steps'], is_training=False)

        # start session and restore model
        training_runners = {
            'sess': tf.Session(),
            'coord': tf.train.Coordinator()
        }
        if cfg['model_path'] is None:
            print('Please specify a checkpoint directory')
            return
        cfg['randomize'] = False
        model['saver_op'].restore(training_runners['sess'], cfg['model_path'])

        utils.print_start_info(cfg,
                               inputs['max_steps'],
                               is_training=is_training)

        data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn(
            inputs, cfg, is_training=False, use_filename_queue=True)
        training_runners['threads'] = data_prefetch_init_fn(
            training_runners['sess'], training_runners['coord'])

        representations, input_batch, target_batch, data_idx = training_runners[
            'sess'].run([
                model['model'].encoder_output, inputs['input_batch'],
                inputs['target_batch'], inputs['data_idxs'],
                inputs['mask_batch']
            ])

        print('Got first batch representation with size:%s' %
              (representations.shape))
        for step in xrange(inputs['max_steps'] - 1):
            encoder_output, input_batch, target_batch, data_idx = training_runners[
                'sess'].run([
                    model['model'].encoder_output, inputs['input_batch'],
                    inputs['target_batch'], inputs['data_idxs'],
                    inputs['mask_batch']
                ])
            representations = np.append(representations,
                                        encoder_output,
                                        axis=0)

            if training_runners['coord'].should_stop():
                break

        print(
            'The size of representations is %s while we expect it to run for %d steps with batchsize %d'
            % (representations.shape, inputs['max_steps'], cfg['batch_size']))

        utils.request_data_loading_end(training_runners)
        utils.end_data_loading_and_sess(training_runners)

        end_train_time = time.time() - start_time
        print('time to train %d epochs: %.3f hrs' %
              (cfg['num_epochs'], end_train_time / (60 * 60)))
        print('avg time per epoch: %.3f hrs' %
              ((end_train_time / (60 * 60)) / cfg['num_epochs']))
示例#4
0
def run_training(cfg, cfg_dir):
    # set up logging
    tf.logging.set_verbosity(tf.logging.INFO)

    with tf.Graph().as_default() as g:
        # create ops and placeholders
        inputs = utils.setup_input_transfer(cfg, is_training=True)
        RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
        RuntimeDeterminedEnviromentVars.populate_registered_variables()
        # build model (and losses and train_op)
        model = utils.setup_model(inputs, cfg, is_training=True)
        # execute training
        start_time = time.time()
        utils.print_start_info(cfg, inputs['max_steps'], is_training=True)
        if cfg['model_type'] == 'empty':  # Can't use tf slim because not trainable variables
            training_runners = {
                'sess': tf.Session(),
                'coord': tf.train.Coordinator()
            }
            data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn(
                inputs, cfg, is_training=True)
            training_runners['threads'] = data_prefetch_init_fn(
                training_runners['sess'], training_runners['coord'])
            try:
                # This just returns the imput as output. It is for testing data
                #  input only.
                for step in xrange(inputs['max_steps']):
                    input_batch, target_batch, data_idx = training_runners[
                        'sess'].run([
                            model['input_batch'], model['target_batch'],
                            model['data_idxs']
                        ])

                    if training_runners['coord'].should_stop():
                        break
            finally:
                utils.request_data_loading_end(training_runners)
                utils.end_data_loading_and_sess(training_runners)
        else:  # Use tf.slim
            train_log_dir = os.path.join(cfg['log_dir'], 'slim-train')
            permanent_checkpoint_dir = os.path.join(cfg['log_dir'],
                                                    'checkpoints')

            session_config = tf.ConfigProto()
            session_config.gpu_options.allow_growth = True
            #max_to_keep = cfg['num_epochs'] * 2
            max_to_keep = 10
            if 'max_ckpts_to_keep' in cfg:
                max_to_keep = cfg['max_ckpts_to_keep']
            # When ready to use a model, use the code below
            train(
                model['train_op'],
                train_log_dir,
                utils.get_data_prefetch_threads_init_fn_transfer(
                    inputs, cfg, is_training=True),
                train_step_fn=model['train_step_fn'],
                train_step_kwargs=model['train_step_kwargs'],
                global_step=model['global_step'],
                number_of_steps=inputs['max_steps'],
                number_of_epochs=cfg['num_epochs'],
                init_fn=model['init_fn'],
                save_checkpoint_every=max(inputs['max_steps'] // (max_to_keep),
                                          500),
                cfg_dir=cfg_dir,
                #RuntimeDeterminedEnviromentVars.steps_per_epoch,
                permanent_checkpoint_dir=permanent_checkpoint_dir,
                save_summaries_secs=cfg['summary_save_every_secs'],
                save_interval_secs=cfg['checkpoint_save_every_secs'],
                saver=model['saver_op'],
                return_accuracy='return_accuracy' in cfg
                and cfg['return_accuracy'],
                session_config=session_config)

        end_train_time = time.time() - start_time
        print('time to train %d epochs: %.3f hrs' %
              (cfg['num_epochs'], end_train_time / (60 * 60)))
        print('avg time per epoch: %.3f hrs' %
              ((end_train_time / (60 * 60)) / cfg['num_epochs']))
示例#5
0
def run_training(cfg, cfg_dir, args):
    if args.stat_type == "mean":
        statistic = MeanMeter(cfg)
    elif args.stat_type == 'median':
        statistic = MedianMeter(cfg)
    elif args.stat_type == 'marginal':
        statistic = DiscreteDistributionMeter(cfg, args.not_one_hot)
    elif args.stat_type == 'dense_marginal':
        statistic = DenseDiscreteDistributionMeter(cfg)
    elif args.stat_type == 'moments':
        statistic = MomentsMeter(cfg)
    else:
        raise NotImplementedError("No average defined for type: {}".format(
            args.stat_type))

    # set up logging
    tf.logging.set_verbosity(tf.logging.ERROR)

    with tf.Graph().as_default() as g:
        # create ops and placeholders
        inputs = utils.setup_input(cfg, is_training=False)
        RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
        RuntimeDeterminedEnviromentVars.populate_registered_variables()

        # execute training
        start_time = time.time()
        max_steps = get_max_steps(inputs['max_steps'], args.data_split)
        utils.print_start_info(cfg, max_steps, is_training=False)
        data_prefetch_threads_init_fn = utils.get_data_prefetch_threads_init_fn(
            inputs, cfg, is_training=False)
        training_runners = {
            'sess': tf.Session(),
            'coord': tf.train.Coordinator()
        }

        prefetch_threads = threading.Thread(
            target=data_prefetch_threads_init_fn,
            args=(training_runners['sess'], training_runners['coord']))
        prefetch_threads.start()

        target_batch = training_runners['sess'].run(inputs['target_batch'])
        # training_runners[ 'threads' ] = data_prefetch_init_fn( training_runners[ 'sess' ], training_runners[ 'coord' ] )
        try:
            # This just returns the imput as output. It is for testing data
            #  input only.
            start_time = time.time()
            batch_time = time.time()
            k = int(args.print_every)
            for step in range(max_steps):
                target_batch, mask_batch = training_runners['sess'].run(
                    [inputs['target_batch'], inputs['mask_batch']])
                target_batch = map_to_img(target_batch.mean(axis=0), cfg)
                if len(mask_batch.shape) > 1:
                    mask_batch = mask_batch.mean(axis=0)
                else:
                    mask_batch = 1

                statistic.update(target_batch, mask_batch)
                if (step + 1) % k == 0:
                    print('Step %d/%d: %.2f s/step ' %
                          (step + 1, max_steps,
                           (time.time() - batch_time) / k))
                    batch_time = time.time()
                    # print(statistic.get())
                    # break
                if training_runners['coord'].should_stop():
                    break

            end_train_time = time.time() - start_time
            print('time to train %d epochs: %.3f hrs' %
                  (cfg['num_epochs'], end_train_time / (60 * 60)))
            print('avg time per epoch: %.3f hrs' %
                  ((end_train_time / (60 * 60)) / cfg['num_epochs']))
            if args.stat_type == 'moments':
                save_moments(statistic, cfg, args)
            else:
                save_data(statistic, cfg, args)
        finally:
            utils.request_data_loading_end(training_runners)
            utils.end_data_loading_and_sess(training_runners)
示例#6
0
def run_val_test(cfg):
    # set up logging
    tf.logging.set_verbosity(tf.logging.INFO)

    tf.reset_default_graph()
    training_runners = {
        'sess': tf.InteractiveSession(),
        'coord': tf.train.Coordinator()
    }
    # create ops and placeholders
    inputs = utils.setup_input(cfg, is_training=False)
    RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
    RuntimeDeterminedEnviromentVars.populate_registered_variables()

    # build model (and losses and train_op)
    model = utils.setup_model(inputs, cfg, is_training=False)
    #        full_path = tf.train.latest_checkpoint(checkpoint_dir)
    #        step = full_path.split('-')[-1]

    #    model_path = os.path.join('/home/ubuntu/s3/model_log', cfg['task_name'], 'model.permanent-ckpt')

    model_path = os.path.join('/home/ubuntu/s3/model_log_final',
                              cfg['task_name'], 'model.permanent-ckpt')
    model['saver_op'].restore(training_runners['sess'], model_path)
    m = model['model']
    # execute training
    start_time = time.time()
    utils.print_start_info(cfg, inputs['max_steps'], is_training=False)

    data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn(
        inputs, cfg, is_training=False, use_filename_queue=False)

    prefetch_threads = threading.Thread(target=data_prefetch_init_fn,
                                        args=(training_runners['sess'],
                                              training_runners['coord']))
    prefetch_threads.start()

    print("Dataloading workers dispatched....")

    return_accuracy = 'return_accuracy' in cfg and cfg['return_accuracy'],

    losses_mean = AverageMeter()
    accuracy_mean = AverageMeter()
    for step in range(inputs['max_steps']):
        #print(step)
        if return_accuracy:
            (data_idx, loss, accuracy) = training_runners['sess'].run(
                [model['data_idxs'], m.losses[0], m.accuracy])
            losses_mean.update(loss)
            accuracy_mean.update(accuracy)
            if step % 100 == 0:
                print(
                    'Step: {step} with Current Losses mean: {loss}; with accuracy: {accur}'
                    .format(step=step,
                            loss=losses_mean.avg,
                            accur=accuracy_mean.avg))
        else:
            (data_idx, loss) = training_runners['sess'].run(
                [model['data_idxs'], m.losses[0]])
            losses_mean.update(loss)
            if step % 100 == 0:
                print('Step: {step} with Current Losses mean: {loss}'.format(
                    step=step, loss=losses_mean.avg))
    if return_accuracy:
        print('Final Losses mean: {loss}; with accuracy: {accur}'.format(
            loss=losses_mean.avg, accur=accuracy_mean.avg))
    else:
        print('Final Losses mean: {loss}'.format(loss=losses_mean.avg))

    end_train_time = time.time() - start_time
    print('time to train %d epochs: %.3f hrs' %
          (cfg['num_epochs'], end_train_time / (60 * 60)))
    print('avg time per epoch: %.3f hrs' % ((end_train_time /
                                             (60 * 60)) / cfg['num_epochs']))
def run_to_task():
    import general_utils
    from   general_utils import RuntimeDeterminedEnviromentVars
    args = parser.parse_args()
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)
    tf.logging.set_verbosity(tf.logging.ERROR)
    image_list = glob.glob(args.img_dir +"/*.png")
    image_list.sort()
    #image_list=image_list[:5]
    print(len(image_list))
    for task in tqdm(list_of_tasks):
        print("Task is ", task)
        if task not in list_of_tasks:
            raise ValueError('Task not supported')
        low_sat_tasks = 'autoencoder curvature denoise edge2d edge3d \
        keypoint2d keypoint3d \
        reshade rgb2depth rgb2mist rgb2sfnorm \
        segment25d segment2d room_layout'.split()
        cfg = generate_cfg(task)
        if task in low_sat_tasks:
            cfg['input_preprocessing_fn'] = load_ops.resize_rescale_image_low_sat
        print("Doing {task}".format(task=task))
        general_utils = importlib.reload(general_utils)
        tf.reset_default_graph()
        training_runners = { 'sess': tf.InteractiveSession(), 'coord': tf.train.Coordinator() }

        ############## Set Up Inputs ##############
        # tf.logging.set_verbosity( tf.logging.INFO )
        setup_input_fn = utils.setup_input
        inputs = setup_input_fn( cfg, is_training=False, use_filename_queue=False )
        RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
        RuntimeDeterminedEnviromentVars.populate_registered_variables()
        start_time = time.time()

        ############## Set Up Model ##############
        model = utils.setup_model( inputs, cfg, is_training=False )
        m = model[ 'model' ]
        model[ 'saver_op' ].restore( training_runners[ 'sess' ], cfg[ 'model_path' ] )
        #Prints all the class variables and functions
        print(dir(m))
        for img_name in tqdm(image_list):
            filename = img_name.split('/')[-1].split('.')[0]

            img = load_raw_image_center_crop( img_name )
            img = skimage.img_as_float(img)
            scipy.misc.toimage(np.squeeze(img), cmin=0.0, cmax=1.0).save(img_name)

            # Since we observe that areas with pixel values closes to either 0 or 1 sometimes overflows, we clip pixels value



            if task == 'jigsaw' :
                img = cfg[ 'input_preprocessing_fn' ]( img, target=cfg['target_dict'][random.randint(0,99)],
                                                        **cfg['input_preprocessing_fn_kwargs'] )
            else:
                img = cfg[ 'input_preprocessing_fn' ]( img, **cfg['input_preprocessing_fn_kwargs'] )

            img = img[np.newaxis,:]




            if task in fc_task_list:
                predicted, representation, decoder_features, encoder_features = training_runners['sess'].run(
                        [ m.decoder_output,  m.encoder_output, m.metric_endpoints, m.encoder_endpoints ], feed_dict={m.input_images: img} )
            else:
                predicted, representation, decoder_features, encoder_features = training_runners['sess'].run(
                        [ m.decoder_output,  m.encoder_output, m.decoder_endpoints, m.encoder_endpoints ], feed_dict={m.input_images: img} )
            #np.save(save_path,value)
            #for name,value in decoder_features.items():
            #    print (name)
            ## CKD : Uncomment below for loop
            for name,value in encoder_features.items():
                if name in encoder_save_list or name in feedforward_encoder_save_list:
                    #print (name)
                    name = name.replace('/', '_')
                    save_path = os.path.join(args.save_dir,filename+"_"+task + "_" + name + ".npy")
                    np.save(save_path,value)
            if args.store_rep:
                s_name, file_extension = os.path.splitext(args.store_name)
                with open('{}.npy'.format(s_name), 'wb') as fp:
                    np.save(fp, np.squeeze(representation))

            if args.store_pred:
                save_path = os.path.join(args.save_dir,filename+"_"+task + "_" + "prediction" + ".npy")
                with open(save_path, 'wb') as fp:
                    np.save(fp, np.squeeze(predicted))
            #if task == 'segment2d' or task == 'segment25d':
            #    segmentation_pca(predicted, args.store_name)
                #return
            #if task == 'colorization':
            #    single_img_colorize(predicted, img , args.store_name)
                #return

            #if task == 'curvature':
            #    curvature_single_image(predicted, args.store_name)
                #return

            #just_rescale = ['autoencoder', 'denoise', 'edge2d',
            #                'edge3d', 'keypoint2d', 'keypoint3d',
            #                'reshade', 'rgb2sfnorm' ]

            #if task in just_rescale:
            #    print(args.store_name)
                #simple_rescale_img(predicted, args.store_name)
                #return

            #just_clip = ['rgb2depth', 'rgb2mist']
            #if task in just_clip:
            #    depth_single_image(predicted, args.store_name)
            #    #return

            #if task == 'inpainting_whole':
            #    inpainting_bbox(predicted, args.store_name)
                #return

            #if task == 'segmentsemantic':
            #    semseg_single_image( predicted, img, args.store_name)
                #return

            #if task in ['class_1000', 'class_places']:
             #   print("The shape of predicted is ---------------", predicted.shape)
                #classification(predicted, synset, args.store_name)
                #return

            #if task == 'vanishing_point':
            #    _ = plot_vanishing_point_smoothed(np.squeeze(predicted), (np.squeeze(img) + 1. )/2., args.store_name, [])
                #return

            #if task == 'room_layout':
            #    mean = np.array([0.006072743318127848, 0.010272365569691076, -3.135909774145468,
            #                    1.5603802322235532, 5.6228218371102496e-05, -1.5669352793761442,
            #                                5.622875878174759, 4.082800262277375, 2.7713941642895956])
            #    std = np.array([0.8669452525283652, 0.687915294956501, 2.080513632043758,
            #                    0.19627420479282623, 0.014680602791251812, 0.4183827359302299,
            #                                3.991778013006544, 2.703495278378409, 1.2269185938626304])
            #    predicted = predicted * std + mean
            #    plot_room_layout(np.squeeze(predicted), (np.squeeze(img) + 1. )/2., args.store_name, [], cube_only=True)
                #return

            #if task == 'jigsaw':
            #    predicted = np.argmax(predicted, axis=1)
            #    perm = cfg[ 'target_dict' ][ predicted[0] ]
            #    show_jigsaw((np.squeeze(img) + 1. )/2., perm, args.store_name)
                #return

            ############## Clean Up ##############
        training_runners[ 'coord' ].request_stop()
        training_runners[ 'coord' ].join()
        print("Done: {}".format(task))

        ############## Reset graph and paths ##############
        tf.reset_default_graph()
        training_runners['sess'].close()
    return
示例#8
0
def run_to_task(args):
    import general_utils
    from general_utils import RuntimeDeterminedEnviromentVars

    tf.logging.set_verbosity(tf.logging.ERROR)

    img = load_raw_image_center_crop(args.im_name)
    img = skimage.img_as_float(img)
    scipy.misc.toimage(np.squeeze(img), cmin=0.0, cmax=1.0).save(args.im_name)

    task = args.task
    if task not in list_of_tasks:
        raise ValueError('Task not supported')

    cfg = generate_cfg(task)

    # Since we observe that areas with pixel values closes to either 0 or 1 sometimes overflows, we clip pixels value
    low_sat_tasks = 'autoencoder curvature denoise edge2d edge3d \
    keypoint2d keypoint3d \
    reshade rgb2depth rgb2mist rgb2sfnorm \
    segment25d segment2d room_layout'.split()
    if task in low_sat_tasks:
        cfg['input_preprocessing_fn'] = load_ops.resize_rescale_image_low_sat

    if task == 'jigsaw':
        img = cfg['input_preprocessing_fn'](
            img,
            target=cfg['target_dict'][random.randint(0, 99)],
            **cfg['input_preprocessing_fn_kwargs'])
    else:
        img = cfg['input_preprocessing_fn'](
            img, **cfg['input_preprocessing_fn_kwargs'])

    img = img[np.newaxis, :]

    if task == 'class_places' or task == 'class_1000':
        synset = get_synset(task)

    print("Doing {task}".format(task=task))
    general_utils = importlib.reload(general_utils)
    tf.reset_default_graph()
    training_runners = {
        'sess': tf.InteractiveSession(),
        'coord': tf.train.Coordinator()
    }

    ############## Set Up Inputs ##############
    # tf.logging.set_verbosity( tf.logging.INFO )
    setup_input_fn = utils.setup_input
    inputs = setup_input_fn(cfg, is_training=False, use_filename_queue=False)
    RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
    RuntimeDeterminedEnviromentVars.populate_registered_variables()
    start_time = time.time()

    ############## Set Up Model ##############
    model = utils.setup_model(inputs, cfg, is_training=False)
    m = model['model']
    model['saver_op'].restore(training_runners['sess'], cfg['model_path'])

    predicted, representation = training_runners['sess'].run(
        [m.decoder_output, m.encoder_endpoints[args.encoder_layer]],
        feed_dict={m.input_images: img})

    if args.store_rep:
        s_name, file_extension = os.path.splitext(args.store_name)

        if args.compress_rep:
            representation = tf.keras.layers.AveragePooling2D(
                pool_size=(2, 2))(tf.constant(representation))
            representation = training_runners['sess'].run(representation)

        with open('{}.npy'.format(s_name), 'wb') as fp:
            np.save(fp, np.squeeze(representation))

    if args.store_pred:
        s_name, file_extension = os.path.splitext(args.store_name)
        with open('{}_pred.npy'.format(s_name), 'wb') as fp:
            np.save(fp, np.squeeze(predicted))

    if task == 'segment2d' or task == 'segment25d':
        segmentation_pca(predicted, args.store_name)
        return
    if task == 'colorization':
        single_img_colorize(predicted, img, args.store_name)
        return

    if task == 'curvature':
        curvature_single_image(predicted, args.store_name)
        return

    just_rescale = [
        'autoencoder', 'denoise', 'edge2d', 'edge3d', 'keypoint2d',
        'keypoint3d', 'reshade', 'rgb2sfnorm'
    ]

    if task in just_rescale:
        simple_rescale_img(predicted, args.store_name)
        return

    just_clip = ['rgb2depth', 'rgb2mist']
    if task in just_clip:
        depth_single_image(predicted, args.store_name)
        return

    if task == 'inpainting_whole':
        inpainting_bbox(predicted, args.store_name)
        return

    if task == 'segmentsemantic':
        semseg_single_image(predicted, img, args.store_name)
        return

    if task in ['class_1000', 'class_places']:
        classification(predicted, synset, args.store_name)
        return

    if task == 'vanishing_point':
        _ = plot_vanishing_point_smoothed(np.squeeze(predicted),
                                          (np.squeeze(img) + 1.) / 2.,
                                          args.store_name, [])
        return

    if task == 'room_layout':
        mean = np.array([
            0.006072743318127848, 0.010272365569691076, -3.135909774145468,
            1.5603802322235532, 5.6228218371102496e-05, -1.5669352793761442,
            5.622875878174759, 4.082800262277375, 2.7713941642895956
        ])
        std = np.array([
            0.8669452525283652, 0.687915294956501, 2.080513632043758,
            0.19627420479282623, 0.014680602791251812, 0.4183827359302299,
            3.991778013006544, 2.703495278378409, 1.2269185938626304
        ])
        predicted = predicted * std + mean
        plot_room_layout(np.squeeze(predicted), (np.squeeze(img) + 1.) / 2.,
                         args.store_name, [],
                         cube_only=True)
        return

    if task == 'jigsaw':
        predicted = np.argmax(predicted, axis=1)
        perm = cfg['target_dict'][predicted[0]]
        show_jigsaw((np.squeeze(img) + 1.) / 2., perm, args.store_name)
        return

    ############## Clean Up ##############
    training_runners['coord'].request_stop()
    training_runners['coord'].join()
    print("Done: {}".format(config_name))

    ############## Reset graph and paths ##############
    tf.reset_default_graph()
    training_runners['sess'].close()
    return
                general_utils = importlib.reload(general_utils)
                tf.reset_default_graph()
                training_runners = {
                    'sess': tf.InteractiveSession(),
                    'coord': tf.train.Coordinator()
                }

                ############## Set Up Inputs ##############
                # tf.logging.set_verbosity( tf.logging.INFO )
                setup_input_fn = utils.setup_input
                inputs = setup_input_fn(cfg,
                                        is_training=False,
                                        use_filename_queue=False)
                RuntimeDeterminedEnviromentVars.load_dynamic_variables(
                    inputs, cfg)
                RuntimeDeterminedEnviromentVars.populate_registered_variables()
                start_time = time.time()

                ############## Set Up Model ##############
                model = utils.setup_model(inputs, cfg, is_training=False)
                m = model['model']
                model['saver_op'].restore(training_runners['sess'],
                                          cfg['model_path'])
                '''
                # encoder (extract features)
                predicted, representation = training_runners['sess'].run( 
                        [ m.decoder_output,  m.encoder_output ], feed_dict={m.input_images: img} )
                
                '''
                representation = training_runners['sess'].run(
                    m.encoder_output, feed_dict={m.input_images: img})
示例#10
0
def run_to_task():
    import general_utils
    from general_utils import RuntimeDeterminedEnviromentVars

    tf.logging.set_verbosity(tf.logging.ERROR)

    args = parser.parse_args()

    task = args.task
    if task not in list_of_tasks:
        raise ValueError('Task not supported')

    cfg = utils.generate_cfg(task)

    # Since we observe that areas with pixel values closes to either 0 or 1 sometimes overflows, we clip pixels value
    low_sat_tasks = 'autoencoder curvature denoise edge2d edge3d \
    keypoint2d keypoint3d \
    reshade rgb2depth rgb2mist rgb2sfnorm \
    segment25d segment2d room_layout'.split()
    if task in low_sat_tasks:
        cfg['input_preprocessing_fn'] = load_ops.resize_rescale_image_low_sat

    print("Doing {task}".format(task=task))
    general_utils = importlib.reload(general_utils)
    tf.reset_default_graph()
    training_runners = {
        'sess': tf.InteractiveSession(),
        'coord': tf.train.Coordinator()
    }

    ############## Set Up Inputs ##############
    # tf.logging.set_verbosity( tf.logging.INFO )
    setup_input_fn = utils.setup_input
    inputs = setup_input_fn(cfg, is_training=False, use_filename_queue=False)
    RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
    RuntimeDeterminedEnviromentVars.populate_registered_variables()
    start_time = time.time()

    ############## Set Up Model ##############
    model = utils.setup_model(inputs, cfg, is_training=False)
    m = model['model']
    model['saver_op'].restore(training_runners['sess'], cfg['model_path'])

    ############## Single Image ##############

    if args.imgs_list:
        with open(args.imgs_list) as imgs_list:
            all_prediction = []
            all_representation = []

            for line in imgs_list:
                filename = args.dir_name + line.strip().split(',')[0]  # FIXME
                img = prepare_image(task, filename, cfg)
                predicted, representation = training_runners['sess'].run(
                    [m.decoder_output, m.encoder_output],
                    feed_dict={m.input_images: img})

                utils.tasks(
                    task,
                    args,
                    predicted,
                    os.path.join(args.store_name +
                                 line.split(os.path.sep)[-1].strip() + '.jpg'),
                    img=img)
                all_prediction.append(np.squeeze(predicted))
                all_representation.append(np.squeeze(representation))

            if args.store_rep:
                s_name, file_extension = os.path.splitext(args.store_name)
                with open('{}.npy'.format(s_name), 'wb') as fp:
                    np.save(fp, np.array(all_representation))

            if args.store_pred:
                s_name, file_extension = os.path.splitext(args.store_name)
                with open('{}_pred.npy'.format(s_name), 'wb') as fp:
                    np.save(fp, np.array(all_prediction))
    else:
        img = prepare_image(task, args.im_name, cfg)

        predicted, representation = training_runners['sess'].run(
            [m.decoder_output, m.encoder_output],
            feed_dict={m.input_images: img})

        utils.tasks(task, args, predicted, representation, img)

        if args.store_rep:
            s_name, file_extension = os.path.splitext(args.store_name)
            with open('{}.npy'.format(s_name), 'wb') as fp:
                np.save(fp, np.squeeze(representation))

        if args.store_pred:
            s_name, file_extension = os.path.splitext(args.store_name)
            with open('{}_pred.npy'.format(s_name), 'wb') as fp:
                np.save(fp, np.squeeze(predicted))

    ############## Clean Up ##############
    training_runners['coord'].request_stop()
    training_runners['coord'].join()
    # print("Done: {}".format(config_name))

    ############## Reset graph and paths ##############
    tf.reset_default_graph()
    training_runners['sess'].close()
    return
def run_to_task():
    import general_utils
    from   general_utils import RuntimeDeterminedEnviromentVars

    tf.logging.set_verbosity(tf.logging.ERROR)
   
    args = parser.parse_args()

    imgs = args.im_name.split(',')

    if args.task == 'ego_motion' and len(imgs) != 3:
        raise ValueError('Wrong number of images, expecting 3 but got {}'.format(len(imgs)))
    if args.task != 'ego_motion' and len(imgs) != 2:
        raise ValueError('Wrong number of images, expecting 2 but got {}'.format(len(imgs)))

    
    task = args.task
    if task not in list_of_tasks:
        raise ValueError('Task not supported')

    cfg = generate_cfg(task)

    input_img = np.empty((len(imgs),256,256,3), dtype=np.float32)
    for i,imname in enumerate(imgs):
        img = load_raw_image_center_crop( imname )
        img = skimage.img_as_float(img)
        scipy.misc.toimage(np.squeeze(img), cmin=0.0, cmax=1.0).save(imname)
        img = cfg[ 'input_preprocessing_fn' ]( img, **cfg['input_preprocessing_fn_kwargs'] )
        input_img[i,:,:,:] = img
    input_img = input_img[np.newaxis, :]
    

    print("Doing {task}".format(task=task))
    general_utils = importlib.reload(general_utils)
    tf.reset_default_graph()
    training_runners = { 'sess': tf.InteractiveSession(), 'coord': tf.train.Coordinator() }

    ############## Set Up Inputs ##############
    # tf.logging.set_verbosity( tf.logging.INFO )
    setup_input_fn = utils.setup_input
    inputs = setup_input_fn( cfg, is_training=False, use_filename_queue=False )
    RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
    RuntimeDeterminedEnviromentVars.populate_registered_variables()
    start_time = time.time()

    ############## Set Up Model ##############
    model = utils.setup_model( inputs, cfg, is_training=False )
    m = model[ 'model' ]
    model[ 'saver_op' ].restore( training_runners[ 'sess' ], cfg[ 'model_path' ] )

    predicted, representation = training_runners['sess'].run( 
            [ m.decoder_output,  m.encoder_output ], feed_dict={m.input_images: input_img} )

    if args.store_rep:
        s_name, file_extension = os.path.splitext(args.store_name)
        with open('{}.npy'.format(s_name), 'wb') as fp:
            np.save(fp, np.squeeze(representation))

    if args.store_pred:
        s_name, file_extension = os.path.splitext(args.store_name)
        with open('{}_pred.npy'.format(s_name), 'wb') as fp:
            np.save(fp, np.squeeze(predicted))

    if task == 'ego_motion':
        ego_motion(predicted, args.store_name)
        return
    if task == 'fix_pose':
        cam_pose(predicted, args.store_name, is_fixated=True)
        return   
    if task == 'non_fixated_pose':
        cam_pose(predicted, args.store_name, is_fixated=False)
        return
    if task == 'point_match':
        prediction = np.argmax(predicted, axis=1)
        print('the prediction (1 stands for match, 0 for unmatch)is: ', prediction)
        return       
    ############## Clean Up ##############
    training_runners[ 'coord' ].request_stop()
    training_runners[ 'coord' ].join()
    print("Done: {}".format(config_name))

    ############## Reset graph and paths ##############            
    tf.reset_default_graph()
    training_runners['sess'].close()
    return
def run_extract_losses(avg_img, args, cfg, save_dir, given_task):
    transfer = (cfg['model_type'] == architectures.TransferNet)
    if transfer:
        get_data_prefetch_threads_init_fn = utils.get_data_prefetch_threads_init_fn_transfer
        setup_input_fn = utils.setup_input_transfer
    else:
        setup_input_fn = utils.setup_input
        get_data_prefetch_threads_init_fn = utils.get_data_prefetch_threads_init_fn

    stats = Statistics()

    # set up logging
    tf.logging.set_verbosity(tf.logging.ERROR)

    with tf.Graph().as_default() as g:
        # create ops and placeholders
        inputs = setup_input_fn(cfg,
                                is_training=False,
                                use_filename_queue=False)
        RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
        RuntimeDeterminedEnviromentVars.populate_registered_variables()

        # build model (and losses and train_op)
        # model = utils.setup_model( inputs, cfg, is_training=False )
        loss_names = [avg_img_to_loss_type(args.avg_type, given_task)
                      ]  # Keep format the same as extract_losses.py
        loss_fn = get_loss_op(loss_names[0])

        # execute training
        start_time = time.time()
        max_steps = get_max_steps(inputs['max_steps'], args.data_split)
        utils.print_start_info(cfg, max_steps, is_training=False)

        # start session and restore model
        training_runners = {
            'sess': tf.Session(),
            'coord': tf.train.Coordinator()
        }
        try:
            utils.print_start_info(cfg, max_steps, is_training=False)

            data_prefetch_init_fn = get_data_prefetch_threads_init_fn(
                inputs, cfg, is_training=False, use_filename_queue=False)
            #training_runners[ 'threads' ] = data_prefetch_init_fn( training_runners[ 'sess' ], training_runners[ 'coord' ] )
            prefetch_threads = threading.Thread(
                target=data_prefetch_init_fn,
                args=(training_runners['sess'], training_runners['coord']))
            prefetch_threads.start()

            # run one example so that we can calculate some statistics about the representations
            filenames = []
            loss_names_to_vals = {name: [] for name in loss_names}
            start = time.perf_counter()

            print_every = int(args.print_every)
            # run the remaining examples
            for step in range(max_steps):
                data_idx, target, mask = training_runners['sess'].run([
                    inputs['data_idxs'], inputs['target_batch'],
                    inputs['mask_batch']
                ])
                loss = loss_fn(avg_img, target, mask)
                # print(loss)
                assert np.isfinite(loss) and loss >= 0.0
                loss_names_to_vals[loss_names[0]].append(loss)
                filenames.extend(data_idx)
                stats.push(loss)

                if step % print_every == 0 and step > 0:
                    print(
                        'Step {0} of {1}: (Mean {5}: {2:.3f} || stddev: {3:.3f} :: ({4:.2f} secs/step)'
                        .format(step, max_steps - 1, stats.mean(),
                                np.sqrt(stats.variance()),
                                (time.perf_counter() - start) / print_every,
                                loss_names[0]))
                    start = time.perf_counter()
                if training_runners['coord'].should_stop():
                    break

            print(
                'The size of losses is %s while we expect it to run for %d steps with batchsize %d'
                % (len(filenames), inputs['max_steps'], cfg['batch_size']))

            end_train_time = time.time() - start_time
            if args.out_name:
                out_name = args.out_name
            else:
                if args.data_split == "val":
                    split_name = "train"
                if args.data_split == "test":
                    split_name = "val"
                else:
                    raise ValueError(
                        "Cannot adequately name output for data split {}".
                        format(args.data_split))
                out_name = '{avg_type}__{task}_{split}_losses.pkl'.format(
                    task=given_task,
                    split=split_name,
                    avg_type="marginal"
                    if args.avg_type == 'dense_marginal' else args.avg_type)
            save_path = os.path.join(save_dir, out_name)

            with open(save_path, 'wb') as f:
                loss_names_to_vals['file_indexes'] = filenames
                loss_names_to_vals['global_step'] = 0
                if 'dense_xentropy_loss' in loss_names_to_vals:
                    loss_names_to_vals['xentropy_loss'] = loss_names_to_vals[
                        'dense_xentropy_loss']
                    del loss_names_to_vals['dense_xentropy_loss']
                pickle.dump(loss_names_to_vals, f)

            if args.out_dir:
                os.makedirs(args.out_dir, exist_ok=True)
                os.system("sudo mv {fp} {out}/".format(fp=save_path,
                                                       out=args.out_dir))
            else:
                if transfer:
                    os.makedirs('/home/ubuntu/s3/model_log/losses_transfer/',
                                exist_ok=True)
                    os.system(
                        "sudo mv {fp} /home/ubuntu/s3/model_log/losses_transfer/"
                        .format(fp=save_path))
                else:
                    os.makedirs('/home/ubuntu/s3/model_log/losses/',
                                exist_ok=True)
                    os.system("sudo mv {fp} /home/ubuntu/s3/model_log/losses/".
                              format(fp=save_path))

            print('saved losses to {0}'.format(save_path))
            print('time to extract %d epochs: %.3f hrs' %
                  (cfg['num_epochs'], end_train_time / (60 * 60)))
        finally:
            utils.request_data_loading_end(training_runners)
            utils.end_data_loading_and_sess(training_runners)
示例#13
0
def run_extract_representations( args, cfg ):
    # set up logging
    tf.logging.set_verbosity( tf.logging.INFO )

    with tf.Graph().as_default() as g:
        cfg['randomize'] = False
        cfg['num_epochs'] = 1
        # cfg['num_read_threads'] = 5
        # cfg['batch_size']=2
        #if cfg['model_path'] is None:
        #    cfg['model_path'] = tf.train.latest_checkpoint( os.path.join( args.cfg_dir, "logs/slim-train/" ) )
        cfg['model_path'] = os.path.join( args.cfg_dir, "logs/slim-train/model.ckpt-59690")
        # create ops and placeholders
        tf.logging.set_verbosity( tf.logging.INFO )
        inputs = utils.setup_input( cfg, is_training=False, use_filename_queue=True )
        RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
        RuntimeDeterminedEnviromentVars.populate_registered_variables()
        
        # build model (and losses and train_op)
        model = utils.setup_model( inputs, cfg, is_training=False )

        # set up metrics to evaluate
        names_to_values, names_to_updates = setup_metrics( inputs, model, cfg )

        # execute training 
        start_time = time.time()
        utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )

        # start session and restore model
        training_runners = { 'sess': tf.Session(), 'coord': tf.train.Coordinator() }
        try:
            if cfg['model_path'] is None:
                print('Please specify a checkpoint directory')
                return	
            
            model[ 'saver_op' ].restore( training_runners[ 'sess' ], cfg[ 'model_path' ] )
            
            utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )

            data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn( inputs, cfg, is_training=False, use_filename_queue=True )
            training_runners[ 'threads' ] = data_prefetch_init_fn( training_runners[ 'sess' ], training_runners[ 'coord' ] )
            
            # run one example so that we can calculate some statistics about the representations
            filenames = []
            representations, data_idx = training_runners['sess'].run( [ 
                    model['model'].encoder_output, inputs[ 'data_idxs' ] ] )        
            filenames += [ inputs[ 'filepaths_list'][ i ] for i in data_idx ]
            print( 'Got first batch representation with size: {0}'.format( representations.shape ) )

            # run the remaining examples
            for step in xrange( inputs[ 'max_steps' ] - 1 ):
                if step % 100 == 0: 
                    print( 'Step {0} of {1}'.format( step, inputs[ 'max_steps' ] - 1 ))
                encoder_output, data_idx = training_runners['sess'].run( [
                        model['model'].encoder_output, inputs[ 'data_idxs' ] ] )        
                representations = np.append(representations, encoder_output, axis=0)
                filenames += [ inputs[ 'filepaths_list'][ i ] for i in data_idx ]

                if training_runners['coord'].should_stop():
                    break

            print('The size of representations is %s while we expect it to run for %d steps with batchsize %d' % (representations.shape, inputs['max_steps'], cfg['batch_size']))

            end_train_time = time.time() - start_time
            save_path = os.path.join( args.cfg_dir, '../representations.pkl' )
            with open( save_path, 'wb' ) as f:
                pickle.dump( { 'filenames': filenames, 'representations': representations }, f )
            print( 'saved representations to {0}'.format( save_path ))
            print('time to train %d epochs: %.3f hrs' % (cfg['num_epochs'], end_train_time/(60*60)))
            print('avg time per epoch: %.3f hrs' % ( (end_train_time/(60*60)) / cfg['num_epochs']) )
        finally:
            utils.request_data_loading_end( training_runners )
            utils.end_data_loading_and_sess( training_runners )
def run_extract_representations( args, cfg, file_to_process):
    setup_input_fn = utils.setup_input
    # set up logging
    tf.logging.set_verbosity( tf.logging.INFO )

    with tf.Graph().as_default() as g:
        # create ops and placeholders
        tf.logging.set_verbosity( tf.logging.INFO )
        inputs = {}
        inputs['input_batch'] = tf.placeholder( tf.float32, shape=[1,224,224,3], name='input_placeholder')
        inputs['target_batch'] = tf.placeholder( tf.float32, shape=[1,1000], name='target_placeholder' )
        inputs['mask_batch'] = tf.placeholder( tf.float32, shape=[1], name='mask_placeholder' )
        inputs['data_idxs'] = tf.placeholder( tf.int32, shape=[1], name='data_idx_placeholder')
        inputs['num_samples_epoch'] = len(file_to_process) 
        inputs['max_steps'] = len(file_to_process) 
        
        RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
        RuntimeDeterminedEnviromentVars.populate_registered_variables()
        
        # build model (and losses and train_op)
        model = utils.setup_model( inputs, cfg, is_training=False )
        m = model['model']

        # execute training 
        utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )

        # start session and restore model
        training_runners = { 'sess': tf.Session(), 'coord': tf.train.Coordinator() }
        try:
            if cfg['model_path'] is None:
                print('Please specify a checkpoint directory')
                return	
            
            to_restore = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)  
            for v in tuple(to_restore):     
                if 'global_step' in v.name:
                    to_restore.remove(v)
                            
            saver_for_kd = tf.train.Saver(to_restore)
            saver_for_kd.restore( training_runners[ 'sess' ], cfg[ 'model_path' ] )
            #model[ 'saver_op' ].restore( training_runners[ 'sess' ], cfg[ 'model_path' ] )

            for step, filename in enumerate(file_to_process):
                start_time = time.time()
                if step % 100 == 0: 
                    print( 'Step {0} of {1}'.format( step, inputs[ 'max_steps' ] - 1 ))
                m,p,v = filename.decode('UTF-8').split('/')    
                print(filename)
                img_name = '/home/ubuntu/s3/{}/rgb/point_{}_view_{}_domain_rgb.png'.format(m, p, v)
                sfm_dir = 's3://taskonomy-unpacked-oregon/{}/softmax_1000'.format(m)
                os.system('sudo mkdir -p /home/ubuntu/s3/{}/softmax_1000/'.format(m))
                os.system('mkdir -p /home/ubuntu/temp/{}/'.format(m))
                npy_name = 'point_{}_view_{}.npy'.format(p, v)
                if os.path.isfile('/home/ubuntu/s3/{}/softmax_1000/{}'.format(m, npy_name)):
                    continue
                if not os.path.isfile(img_name):
                    continue
                img = skimage.io.imread(img_name, as_grey=False)
                img = resize_rescale_imagenet(img, new_dims=(224,224))
                img = np.reshape(img, (1,224,224,3))
                feed_dict = {inputs['input_batch'] : img}
                predicted = training_runners['sess'].run( model['model'].encoder_output, feed_dict=feed_dict )
                # maxs = np.amax(predicted, axis=-1)
                # softmax = np.exp(predicted - np.expand_dims(maxs, axis=-1))
                # sums = np.sum(softmax, axis=-1)
                # softmax = softmax / np.expand_dims(sums, -1)
                # print(softmax)
                # pdb.set_trace()
                local_npy = os.path.join('/home/ubuntu/temp/{}'.format(m), npy_name) 
                with open(local_npy, 'wb') as fp:
                    np.save(fp, predicted)
                os.system('aws s3 mv {} {}/'.format(local_npy, sfm_dir))
                if training_runners['coord'].should_stop():
                    break
                end_train_time = time.time() - start_time
                print('time to extract  %.3f ' % (end_train_time))

        finally:
            utils.request_data_loading_end( training_runners )
            utils.end_data_loading_and_sess( training_runners )
示例#15
0
def run_rand_baseline( args, cfg, given_task ):
    # set up logging
    tf.logging.set_verbosity( tf.logging.INFO )

    with tf.Graph().as_default() as g:
        # create ops and placeholders
        tf.logging.set_verbosity( tf.logging.INFO )
        inputs = utils.setup_input( cfg, is_training=False, use_filename_queue=False )
        RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
        RuntimeDeterminedEnviromentVars.populate_registered_variables()
        
        # build model (and losses and train_op)
        model = utils.setup_model( inputs, cfg, is_training=False )

        # set up metrics to evaluate
        names_to_values, names_to_updates = setup_metrics( inputs, model, cfg )

        # execute training 
        start_time = time.time()
        utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )

        # start session and restore model
        training_runners = { 'sess': tf.Session(), 'coord': tf.train.Coordinator() }
        try:
            
            utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )

            data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn( inputs, cfg, is_training=False, use_filename_queue=False )
            #training_runners[ 'threads' ] = data_prefetch_init_fn( training_runners[ 'sess' ], training_runners[ 'coord' ] )
            prefetch_threads = threading.Thread(
                target=data_prefetch_init_fn,
                args=( training_runners[ 'sess' ], training_runners[ 'coord' ] ))
            prefetch_threads.start()
            
            # run one example so that we can calculate some statistics about the representations
            targets = training_runners['sess'].run( inputs[ 'target_batch' ] )         
       
            # run the remaining examples
            for step in range( inputs[ 'max_steps' ] - 1 ):
            #for step in range( 10 ):
                if step % 100 == 0: 
                    print( 'Step {0} of {1}'.format( step, inputs[ 'max_steps' ] - 1 ))
               
                target = training_runners['sess'].run( inputs[ 'target_batch' ] )  
                targets = np.append( targets, target, axis=0)

                if training_runners['coord'].should_stop():
                    break

            rand_idx = [random.randint(0, targets.shape[0] - 1) for i in range(targets.shape[0])] 
            rand_target = [targets[i] for i in rand_idx]
            rand_target = np.vstack(rand_target)

            counter = 0
            sum = 0
            for step in range( inputs[ 'max_steps' ] - 1 ):
            #for step in range( 10 ):
                if step % 100 == 0: 
                    print( 'Step {0} of {1}'.format( step, inputs[ 'max_steps' ] - 1 ))
               
                tar = targets[step*cfg['batch_size']:(step+1)*cfg['batch_size']]
                rand = rand_target[step*cfg['batch_size']:(step+1)*cfg['batch_size']]

                losses = training_runners['sess'].run( model['model'].losses, feed_dict={
                    inputs['target_batch']: tar, model['model'].final_output:rand})
                sum += losses[0]
                counter += 1
                
                if training_runners['coord'].should_stop():
                    break

            print(sum)
            print(counter)
            print('random_baseline has loss: {loss}'.format(loss=sum/counter))
            end_train_time = time.time() - start_time
            
        finally:
            utils.request_data_loading_end( training_runners )
            utils.end_data_loading_and_sess( training_runners )
示例#16
0
def run_to_task(task_to):
    import general_utils
    from general_utils import RuntimeDeterminedEnviromentVars
    import models.architectures as architectures
    from data.load_ops import resize_rescale_image
    import utils
    from data.task_data_loading import load_and_specify_preprocessors_for_representation_extraction
    import lib.data.load_ops as load_ops
    from importlib import reload
    import tensorflow as tf
    tf.logging.set_verbosity(tf.logging.ERROR)

    # for arch in ['regular', 'shallow', 'dilated_shallow', 'dilated_regular']:
    arch = args.arch
    data_amount = args.data
    if args.second_order:
        global TRANSFER_TYPE
        TRANSFER_TYPE = 'second_order'
    if not args.no_regenerate_data:
        #if False:
        all_outputs = {}
        pickle_dir = 'viz_{task_to}_transfer_{hs}_{arch}.pkl'.format(
            arch=arch, hs=args.hs, task_to=task_to)
        subprocess.call(
            "aws s3 cp s3://task-preprocessing-512-oregon/visualizations/transfer_viz/viz_{}.pkl {}"
            .format(task_to, pickle_dir),
            shell=True)
        import os
        if os.path.isfile(pickle_dir):
            with open(pickle_dir, 'rb') as fp:
                all_outputs = pickle.load(fp)

        if args.second_order:
            import itertools
            with open(
                    '/home/ubuntu/task-taxonomy-331b/tools/ranked_first_order_transfers.pkl',
                    'rb') as fp:
                data = pickle.load(fp)
                list_of_src_tasks = list(
                    itertools.combinations(data[task_to][:5], 2))
                list_of_src_tasks = [
                    '{}__{}'.format(x[0], x[1]) for x in list_of_src_tasks
                ]
            with open(
                    '/home/ubuntu/task-taxonomy-331b/tools/second_order_should_flip.pkl',
                    'rb') as fp:
                to_flip_dict = pickle.load(fp)
            if args.find_all_src:
                config_dir_root = '/home/ubuntu/task-taxonomy-331b/experiments/second_order/{arch}/{data}'.format(
                    arch=arch, data=data_amount)
                all_configs = os.listdir(config_dir_root)
                list_of_src_tasks = []
                for i in all_configs:
                    if i.split('__')[-3] == task_to:
                        list_of_src_tasks.append(i)
                list_of_src_tasks = [
                    '__'.join(x.split('__')[:2]) for x in list_of_src_tasks
                ]
                rank_combo = {}
                for task_from in list_of_src_tasks:
                    first, sec = task_from.split('__')
                    rank_combo[task_from] = (data[task_to].index(first),
                                             data[task_to].index(sec))
        global list_of_src_tasks
        for i, task_from in enumerate(list_of_src_tasks):
            if args.data == '16k':
                if task_from in all_outputs:
                    print("{} already exists....\n\n\n".format(task_from))
                    continue
            else:
                if '{}_{}'.format(task_from, args.data) in all_outputs:
                    print("{} already exists....\n\n\n".format(task_from))
                    continue

            print("Doing from {task_from} to {task_to}".format(
                task_from=task_from, task_to=task_to))
            general_utils = importlib.reload(general_utils)
            tf.reset_default_graph()
            training_runners = {
                'sess': tf.InteractiveSession(),
                'coord': tf.train.Coordinator()
            }

            if task_from == "FULL" or task_from == "FULL_IMAGE":
                task = '{f}__{t}__{hs}__unlocked'.format(f="FULL",
                                                         t=task_to,
                                                         hs=args.hs)
                transfer_src = 'full_order' if task_from == "FULL" else 'full_order_image'
                CONFIG_DIR = '/home/ubuntu/task-taxonomy-331b/experiments/{transfer_type}/{arch}/{data}/{TASK}'.format(
                    transfer_type=transfer_src,
                    arch=arch,
                    data=data_amount,
                    TASK=task)
            elif task_from == "FULL_select" or task_from == "FULL_select_IMAGE":
                task = '{f}__{t}__{hs}__unlocked'.format(f="FULL_select",
                                                         t=task_to,
                                                         hs=args.hs)
                transfer_src = 'full_order_selected' if task_from == "FULL_select" else 'full_order_selected_image'
                CONFIG_DIR = '/home/ubuntu/task-taxonomy-331b/experiments/{transfer_type}/{arch}/{data}/{TASK}'.format(
                    transfer_type=transfer_src,
                    arch=arch,
                    data=data_amount,
                    TASK=task)
            else:
                task = '{f}__{t}__{hs}__unlocked'.format(f=task_from,
                                                         t=task_to,
                                                         hs=args.hs)

                CONFIG_DIR = '/home/ubuntu/task-taxonomy-331b/experiments/{transfer_type}/{arch}/{data}/{TASK}'.format(
                    transfer_type=TRANSFER_TYPE,
                    arch=arch,
                    data=data_amount,
                    TASK=task)
            print(CONFIG_DIR)

            ############## Load Configs ##############
            cfg = utils.load_config(CONFIG_DIR, nopause=True)
            RuntimeDeterminedEnviromentVars.register_dict(cfg)
            if args.second_order and not args.find_all_src and not to_flip_dict[
                    task]:
                cfg['val_representations_file'] = cfg[
                    'val_representations_file'][::-1]
            cfg['num_epochs'] = 1
            cfg['randomize'] = False
            root_dir = cfg['root_dir']
            cfg['num_read_threads'] = 1
            cfg['model_path'] = tf.train.latest_checkpoint(
                os.path.join(cfg['log_root'], 'logs', 'slim-train'
                             #'time'
                             ))
            # print(cfg['model_path'])
            if cfg['model_path'] is None and task == 'random':
                cfg['model_path'] = tf.train.latest_checkpoint(
                    os.path.join(cfg['log_root'], 'logs', 'slim-train',
                                 'time'))
            if cfg['model_path'] is None:
                continue

            ############## Set Up Inputs ##############
            # tf.logging.set_verbosity( tf.logging.INFO )
            inputs = utils.setup_input_transfer(
                cfg, is_training=ON_TEST_SET, use_filename_queue=False
            )  # is_training determines whether to use train/validaiton
            RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
            RuntimeDeterminedEnviromentVars.populate_registered_variables()
            start_time = time.time()
            # utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )

            ############## Set Up Model ##############
            model = utils.setup_model(inputs, cfg, is_training=True)
            m = model['model']
            model['saver_op'].restore(training_runners['sess'],
                                      cfg['model_path'])

            ############## Start dataloading workers ##############
            data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn_transfer(
                inputs, cfg, is_training=ON_TEST_SET, use_filename_queue=False)

            prefetch_threads = threading.Thread(
                target=data_prefetch_init_fn,
                args=(training_runners['sess'], training_runners['coord']))
            prefetch_threads.start()

            ############## Run First Batch ##############
            (
                input_batch,
                representation_batch,
                target_batch,
                data_idx,
                encoder_output,
                predicted,
                loss,
            ) = training_runners['sess'].run([
                m.input_images, m.input_representations, m.decoder.targets,
                model['data_idxs'], m.encoder_output, m.decoder.decoder_output,
                m.total_loss
            ])
            if task_to == 'segment2d' or task_to == 'segment25d':
                from sklearn.decomposition import PCA
                x = np.zeros((32, 256, 256, 3), dtype='float')
                for i in range(predicted.shape[0]):
                    embedding_flattened = np.squeeze(predicted[i]).reshape(
                        (-1, 64))
                    pca = PCA(n_components=3)
                    pca.fit(embedding_flattened)
                    lower_dim = pca.transform(embedding_flattened).reshape(
                        (256, 256, -1))
                    lower_dim = (lower_dim - lower_dim.min()) / (
                        lower_dim.max() - lower_dim.min())
                    x[i] = lower_dim
                predicted = x
            if task_to == 'segmentsemantic_rb':
                predicted = np.argmax(predicted, axis=-1)
            ############## Clean Up ##############
            training_runners['coord'].request_stop()
            training_runners['coord'].join()

            # if os.path.isfile(pickle_dir):
            #     with open(pickle_dir, 'rb') as fp:
            #         all_outputs = pickle.load(fp)

            ############## Store to dict ##############
            to_store = {
                'data_idx': data_idx,
                'output': predicted,
                'loss': loss
            }
            if args.second_order and args.find_all_src:
                store_key = "{}_{}".format(task_from[:20],
                                           rank_combo[task_from])
                all_outputs[store_key] = to_store
            elif args.data != '16k':
                store_key = "{}_{}".format(task_from, args.data)
                all_outputs[store_key] = to_store
            else:
                all_outputs[task_from] = to_store

            # os.system("sudo cp {d} /home/ubuntu/s3/model_log".format(d=pickle_dir))

            ############## Reset graph and paths ##############
            tf.reset_default_graph()
            training_runners['sess'].close()
            #print(sys.modules.keys())
            #del sys.modules[ 'config' ]
            sys.path = remove_dups(sys.path)
            print('Current Directory: ', os.getcwd())
            pickle_dir = 'viz_{task_to}_transfer_{hs}_{arch}.pkl'.format(
                arch=arch, hs=args.hs, task_to=task_to)
            with open(pickle_dir, 'wb') as fp:
                pickle.dump(all_outputs, fp)
            subprocess.call(
                "aws s3 cp {} s3://task-preprocessing-512-oregon/visualizations/transfer_viz/viz_{}.pkl"
                .format(pickle_dir, task_to),
                shell=True)

    # Run jupyter nb
    print('Running Jupyter Notebooks...')
    #os.makedirs("/home/ubuntu/task-taxonomy-331b/notebooks/transfer_viz/transfer_{hs}_{arch}".format(hs=args.hs, arch=arch), exist_ok=True)
    notebook_path = '/home/ubuntu/task-taxonomy-331b/notebooks/transfer_viz/Visual_{task_to}'.format(
        task_to=task_to)
    if args.second_order and not args.find_all_src:
        notebook_path = '{}-Copy1'.format(notebook_path)
    subprocess.call("jupyter nbconvert \
            --execute {notebook_path}.ipynb \
            --to html \
            --ExecutePreprocessor.kernel_name=python3 \
            --ExecutePreprocessor.timeout=1200 ".format(
        notebook_path=notebook_path, arch=arch, hs=args.hs, task_to=task_to),
                    shell=True)
    subprocess.call(
        "aws s3 cp {}.html s3://task-preprocessing-512-oregon/visualizations/{}/"
        .format(notebook_path, TRANSFER_TYPE),
        shell=True)
def deep_attribution():
    import general_utils
    from general_utils import RuntimeDeterminedEnviromentVars
    tf.logging.set_verbosity(tf.logging.ERROR)

    imlist_file_path = os.path.join(prj_dir, args.explain_result_root,
                                    args.dataset, 'imlist.txt')
    explain_result_root = os.path.join(prj_dir, args.explain_result_root,
                                       args.dataset)
    with open(imlist_file_path) as f:
        lines = []
        line = f.readline().strip()
        while len(line) != 0:
            lines += [os.path.join(prj_dir, 'dataset', args.dataset, line)]
            line = f.readline().strip()
    explain_methods = ['saliency', 'grad*input', 'elrp']
    for task in list_of_tasks:
        if not os.path.exists(os.path.join(explain_result_root, task)):
            os.mkdir(os.path.join(explain_result_root, task))
        cfg = generate_cfg(task)
        print("Doing {task}".format(task=task))
        general_utils = importlib.reload(general_utils)

        tf.reset_default_graph()
        sess = tf.Session()
        training_runners = {'sess': sess, 'coord': tf.train.Coordinator()}
        with DeepExplain(session=sess, graph=sess.graph) as de:
            ############## Set Up Inputs ##############
            setup_input_fn = utils.setup_input
            inputs = setup_input_fn(cfg,
                                    is_training=False,
                                    use_filename_queue=False)
            RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
            RuntimeDeterminedEnviromentVars.populate_registered_variables()
            ############## Set Up Model ##############
            model = utils.setup_model(inputs, cfg, is_training=False)
            m = model['model']
            model['saver_op'].restore(training_runners['sess'],
                                      cfg['model_path'])
            encoder_endpoints = model['model'].encoder_endpoints
            endpoints = encoder_endpoints

        print('There are {} images in {}'.format(len(lines), imlist_file_path))
        img = load_raw_image_center_crop(lines[0])
        img = skimage.img_as_float(img)
        low_sat_tasks = 'autoencoder curvature denoise edge2d edge3d \
                    keypoint2d keypoint3d \
                    reshade rgb2depth rgb2mist rgb2sfnorm \
                    segment25d segment2d room_layout'.split()
        if task in low_sat_tasks:
            cfg['input_preprocessing_fn'] = load_ops.resize_rescale_image_low_sat
        img = cfg['input_preprocessing_fn'](
            img, **cfg['input_preprocessing_fn_kwargs'])
        imgs = np.zeros([args.imlist_size, 1] + list(img.shape), float)
        for line_i in range(args.imlist_size):
            img = load_raw_image_center_crop(lines[line_i])
            img = skimage.img_as_float(img)
            if task not in list_of_tasks:
                raise ValueError('Task not supported')
            if task in low_sat_tasks:
                cfg['input_preprocessing_fn'] = load_ops.resize_rescale_image_low_sat

            if task == 'jigsaw':
                img = cfg['input_preprocessing_fn'](
                    img,
                    target=cfg['target_dict'][random.randint(0, 99)],
                    **cfg['input_preprocessing_fn_kwargs'])
            else:
                img = cfg['input_preprocessing_fn'](
                    img, **cfg['input_preprocessing_fn_kwargs'])

            imgs[line_i, :] = img[np.newaxis, :]

        elrp = np.zeros([args.imlist_size] + list(img.shape), float)
        saliency = np.zeros([args.imlist_size] + list(img.shape), float)
        gradXinput = np.zeros([args.imlist_size] + list(img.shape), float)
        for im_i in range(args.imlist_size):
            with DeepExplain(session=sess) as de:
                representation = training_runners['sess'].run(
                    [endpoints['encoder_output']],
                    feed_dict={m.input_images: imgs[im_i]})
                attributions = {
                    explain_method:
                    de.explain(explain_method, endpoints['encoder_output'],
                               m.input_images, imgs[im_i])
                    for explain_method in explain_methods
                }
            elrp[im_i] = attributions['elrp']
            saliency[im_i] = attributions['saliency']
            gradXinput[im_i] = attributions['grad*input']
            print('{} images done.'.format(im_i))
            if ((im_i + 1) % 5) == 0:
                ############## Clean Up ##############
                training_runners['coord'].request_stop()
                training_runners['coord'].join()
                ############## Reset graph and paths ##############
                tf.reset_default_graph()
                training_runners['sess'].close()
                sess = tf.Session()
                training_runners = {
                    'sess': sess,
                    'coord': tf.train.Coordinator()
                }
                with DeepExplain(session=sess, graph=sess.graph) as de:
                    ############## Set Up Inputs ##############
                    setup_input_fn = utils.setup_input
                    inputs = setup_input_fn(cfg,
                                            is_training=False,
                                            use_filename_queue=False)
                    RuntimeDeterminedEnviromentVars.load_dynamic_variables(
                        inputs, cfg)
                    RuntimeDeterminedEnviromentVars.populate_registered_variables(
                    )
                    ############## Set Up Model ##############
                    model = utils.setup_model(inputs, cfg, is_training=False)
                    m = model['model']
                    model['saver_op'].restore(training_runners['sess'],
                                              cfg['model_path'])
                    encoder_endpoints = model['model'].encoder_endpoints
                    endpoints = encoder_endpoints

        np.save(os.path.join(explain_result_root, task, 'elrp.npy'), elrp)
        np.save(os.path.join(explain_result_root, task, 'saliency.npy'),
                saliency)
        np.save(os.path.join(explain_result_root, task, 'gradXinput.npy'),
                gradXinput)
        ############## Clean Up ##############
        training_runners['coord'].request_stop()
        training_runners['coord'].join()
        ############## Reset graph and paths ##############
        tf.reset_default_graph()
        training_runners['sess'].close()
        print('Task {} Done!'.format(task))
    print('All Done.')
    return
def run_to_task(task_to):

    import general_utils
    from   general_utils import RuntimeDeterminedEnviromentVars
    import models.architectures as architectures
    from   data.load_ops import resize_rescale_image
    import utils
    from   data.task_data_loading import load_and_specify_preprocessors_for_representation_extraction
    import lib.data.load_ops as load_ops
    import pdb
    global synset
    synset_1000 = [" ".join(i.split(" ")[1:]) for i in synset]
    select = np.asarray([ 0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,  1.,
        1.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,
        0.,  0.,  0.,  0.,  1.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  1.,
        1.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  1.,  0.,  1.,  0.,
        0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  1.,  0.,
        0.,  0.,  1.,  0.,  1.,  0.,  0.,  0.,  0.,  1.,  0.,  1.,  0.,
        0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  1.,  0.,  0.,  1.,  0.,  1.,  0.,  0.,  1.,
        0.,  1.,  0.,  1.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
        1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  1.,  0.,  1.,  0.,  0.,
        1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,
        1.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,  1.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  0.,  0.,  1.,  1.,  0.,  0.,  1.,  0.,  1.,
        0.,  1.,  0.,  0.,  0.,  0.,  1.,  0.,  1.,  0.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  1.,  1.,  0.,  0.,  1.,
        0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,
        0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,
        0.,  0.,  0.,  0.,  0.,  1.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,
        0.,  0.,  0.,  1.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,
        0.,  1.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  1.,  0.])

    with open('/home/ubuntu/task-taxonomy-331b/lib/data/places_class_names.txt', 'r') as fp:
        synset_places = [x.rstrip()[4:-1] for x,y in zip(fp.readlines(), select) if y == 1.]

    
    tf.logging.set_verbosity(tf.logging.ERROR)
   
    args = parser.parse_args()
    if args.task is not 'NONE':
        args.idx = list_of_tasks.index(args.task)
    for idx, task in enumerate(list_of_tasks):
        if idx != args.idx and args.idx != -1:
            continue
        if task == 'class_places':
            synset = synset_places
        elif task == 'class_1000':
            synset = synset_1000
        print("Doing {task}".format(task=task))
        general_utils = importlib.reload(general_utils)
        tf.reset_default_graph()
        training_runners = { 'sess': tf.InteractiveSession(), 'coord': tf.train.Coordinator() }

        # task = '{f}__{t}__{hs}'.format(f=task_from, t=task_to, hs=args.hs)
        CONFIG_DIR = '/home/ubuntu/task-taxonomy-331b/experiments/final/{TASK}'.format(TASK=task)

        ############## Load Configs ##############
        cfg = utils.load_config( CONFIG_DIR, nopause=True )
        RuntimeDeterminedEnviromentVars.register_dict( cfg )
        split_file = os.path.join('/home/ubuntu/task-taxonomy-331b/assets/aws_data/', 'video2_info.pkl')
        cfg['train_filenames'] = split_file
        cfg['val_filenames'] = split_file
        cfg['test_filenames'] = split_file 

        cfg['num_epochs'] = 2
        cfg['randomize'] = False
        root_dir = cfg['root_dir']
        cfg['num_read_threads'] = 1
        print(cfg['log_root'])
        cfg['model_path'] = os.path.join(
                cfg['log_root'],
                task,
                'model.permanent-ckpt'
            )

        print( cfg['model_path'])
        if cfg['model_path'] is None:
            continue
        cfg['dataset_dir'] = '/home/ubuntu'
        cfg['preprocess_fn'] = load_and_specify_preprocessors_for_representation_extraction
        ############## Set Up Inputs ##############
        # tf.logging.set_verbosity( tf.logging.INFO )
        inputs = utils.setup_input( cfg, is_training=ON_TEST_SET, use_filename_queue=False ) # is_training determines whether to use train/validaiton
        RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
        RuntimeDeterminedEnviromentVars.populate_registered_variables()
        start_time = time.time()
        # utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )

        ############## Set Up Model ##############
        model = utils.setup_model( inputs, cfg, is_training=IN_TRAIN_MODE )
        m = model[ 'model' ]
        model[ 'saver_op' ].restore( training_runners[ 'sess' ], cfg[ 'model_path' ] )

        ############## Start dataloading workers ##############
        data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn( 
            inputs, cfg, is_training=ON_TEST_SET, use_filename_queue=False )

        prefetch_threads = threading.Thread(
            target=data_prefetch_init_fn,
            args=( training_runners[ 'sess' ], training_runners[ 'coord' ] ))
        prefetch_threads.start()
       
        list_of_fname = np.load('/home/ubuntu/task-taxonomy-331b/assets/aws_data/video2_fname.npy')
        import errno

        try:
            os.mkdir('/home/ubuntu/{}'.format(task))
            os.mkdir('/home/ubuntu/{}/vid1'.format(task))
            os.mkdir('/home/ubuntu/{}/vid2'.format(task))
            os.mkdir('/home/ubuntu/{}/vid3'.format(task))
            os.mkdir('/home/ubuntu/{}/vid4'.format(task))
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise
        curr_comp = np.zeros((3,64))
        curr_fit_img = np.zeros((256,256,3))
        embeddings = []
        ############## Run First Batch ##############

        for step_num in range(inputs['max_steps'] - 1):
        #for step_num in range(1):
            #if step_num > 0 and step_num % 20 == 0:
            print(step_num)
            if not hasattr(m, 'masks'):
                ( 
                    input_batch, target_batch, 
                    data_idx, 
                    predicted, loss,
                ) = training_runners['sess'].run( [ 
                    m.input_images, m.targets,
                    model[ 'data_idxs' ], 
                    m.decoder_output, m.total_loss] )
                mask_batch = 1.
            else:
                ( 
                    input_batch, target_batch, mask_batch,
                    data_idx, 
                    predicted, loss,
                ) = training_runners['sess'].run( [ 
                    m.input_images, m.targets, m.masks,
                    model[ 'data_idxs' ], 
                    m.decoder_output, m.total_loss] )

            if task == 'segment2d' or task == 'segment25d':
                from sklearn.decomposition import PCA  
                x = np.zeros((32,256,256,3), dtype='float')
                k_embed = 8
#                 for i in range(predicted.shape[0]):
                    # embedding_flattened = np.squeeze(predicted[i]).reshape((-1,64))
                    # pca = PCA(n_components=3)
                    # pca.fit(embedding_flattened)
                    # min_order = None
                    # min_dist = float('inf')
                    # for order in itertools.permutations([0,1,2]):
                        # reordered = pca.components_[list(order), :]
                        # dist = np.linalg.norm(curr_comp-reordered)
                        # if dist < min_dist:
                            # min_order = list(order)
                            # min_dist = dist
                    # print(min_order)
                    # pca.components_ = pca.components_[min_order, :]
                    # curr_comp = pca.components_
                    # lower_dim = pca.transform(embedding_flattened).reshape((256,256,-1))
                    # lower_dim = (lower_dim - lower_dim.min()) / (lower_dim.max() - lower_dim.min())
                    # x[i] = lower_dim
                for i in range(predicted.shape[0]):
                    embedding_flattened = np.squeeze(predicted[i]).reshape((-1,64))
                    embeddings.append(embedding_flattened)
                    if len(embeddings) > k_embed:
                        embeddings.pop(0)
                    pca = PCA(n_components=3)
                    pca.fit(np.vstack(embeddings))
                    min_order = None
                    min_dist = float('inf')
                    copy_of_comp = np.copy(pca.components_)
                    for order in itertools.permutations([0,1,2]):
                        #reordered = pca.components_[list(order), :]
                        #dist = np.linalg.norm(curr_comp-reordered)
                        pca.components_ = copy_of_comp[order, :]
                        lower_dim = pca.transform(embedding_flattened).reshape((256,256,-1))
                        lower_dim = (lower_dim - lower_dim.min()) / (lower_dim.max() - lower_dim.min())
                        dist = np.linalg.norm(lower_dim - curr_fit_img)
                        if dist < min_dist:
                            min_order = order 
                            min_dist = dist
                    pca.components_ = copy_of_comp[min_order, :]
                    lower_dim = pca.transform(embedding_flattened).reshape((256,256,-1))
                    lower_dim = (lower_dim - lower_dim.min()) / (lower_dim.max() - lower_dim.min())
                    curr_fit_img = np.copy(lower_dim)
                    x[i] = lower_dim
                predicted = x
            if task == 'curvature':
                std = [31.922, 21.658]
                mean = [123.572, 120.1]
                predicted = (predicted * std) + mean
                predicted[:,0,0,:] = 0.
                predicted[:,1,0,:] = 1.
                predicted = np.squeeze(np.clip(predicted.astype(int) / 255., 0., 1. )[:,:,:,0])

            just_rescale = ['autoencoder', 'denoise', 'edge2d', 
                            'edge3d', 'keypoint2d', 'keypoint3d',
                            'reshade', 'rgb2sfnorm']
            if task in just_rescale:
                predicted = (predicted + 1.) / 2.
                predicted = np.clip(predicted, 0., 1.)
                predicted[:,0,0,:] = 0.
                predicted[:,1,0,:] = 1.


            just_clip = ['rgb2depth', 'rgb2mist']
            if task in just_clip:
                predicted[:,0,0,:] = 0.
                predicted[:,1,0,:] = 1.

            if task == 'segmentsemantic_rb':
                label = np.argmax(predicted, axis=-1)
                COLORS = ('white','red', 'blue', 'yellow', 'magenta', 
                        'green', 'indigo', 'darkorange', 'cyan', 'pink', 
                        'yellowgreen', 'black', 'darkgreen', 'brown', 'gray',
                        'purple', 'darkviolet')
                rgb = (input_batch + 1.) / 2.
                preds = [color.label2rgb(np.squeeze(x), np.squeeze(y), colors=COLORS, kind='overlay')[np.newaxis,:,:,:] for x,y in zip(label, rgb)]
                predicted = np.vstack(preds) 

            if task in ['class_1000', 'class_places']:
                for file_idx, predict_output in zip(data_idx, predicted):
                    to_store_name = list_of_fname[file_idx].decode('utf-8').replace('video', task)
                    to_store_name = os.path.join('/home/ubuntu', to_store_name)
                    sorted_pred = np.argsort(predict_output)[::-1]
                    top_5_pred = [synset[sorted_pred[i]] for i in range(5)]
                    to_print_pred = "Top 5 prediction: \n {}\n {}\n {}\n {} \n {}".format(*top_5_pred)
                    img = Image.new('RGBA', (400, 200), (255, 255, 255))
                    d = ImageDraw.Draw(img)
                    fnt = ImageFont.truetype('/usr/share/fonts/truetype/dejavu/DejaVuSerifCondensed.ttf', 25)
                    d.text((20, 5), to_print_pred, fill=(255, 0, 0), font=fnt)
                    img.save(to_store_name, 'PNG')
            else:
                for file_idx, predict_output in zip(data_idx, predicted):
                    to_store_name = list_of_fname[file_idx].decode('utf-8').replace('video', task)
                    to_store_name = os.path.join('/home/ubuntu', to_store_name)
                    scipy.misc.toimage(np.squeeze(predict_output), cmin=0.0, cmax=1.0).save(to_store_name)

        subprocess.call('tar -czvf /home/ubuntu/{t}.tar.gz /home/ubuntu/{t}'.format(t=task), shell=True)
        subprocess.call('aws s3 cp /home/ubuntu/{t}.tar.gz s3://task-preprocessing-512-oregon/video2/'.format(t=task), shell=True)
        subprocess.call('ffmpeg -r 29.97 -f image2 -s 256x256 -i /home/ubuntu/{t}/vid2/020%04d.png -vcodec libx264 -crf 15  -pix_fmt yuv420p {t}_2.mp4'.format(t=task), shell=True)
        subprocess.call('aws s3 cp {t}_2.mp4 s3://task-preprocessing-512-oregon/video2/'.format(t=task), shell=True)

                

        ############## Clean Up ##############
        training_runners[ 'coord' ].request_stop()
        training_runners[ 'coord' ].join()
        
        # if os.path.isfile(pickle_dir): 
        #     with open(pickle_dir, 'rb') as fp:
        #         all_outputs = pickle.load(fp)
                
        ############## Store to dict ##############
        
        print("Done: {}".format(task))
        # os.system("sudo cp {d} /home/ubuntu/s3/model_log".format(d=pickle_dir))

        ############## Reset graph and paths ##############            
        tf.reset_default_graph()
        training_runners['sess'].close()

    return
示例#19
0
def run_to_task(task_to):

    import general_utils
    from general_utils import RuntimeDeterminedEnviromentVars
    import models.architectures as architectures
    from data.load_ops import resize_rescale_image
    from data.load_ops import rescale_image
    import utils
    from data.task_data_loading import load_and_specify_preprocessors_for_representation_extraction
    from data.task_data_loading import load_and_specify_preprocessors_for_input_depends_on_target
    import lib.data.load_ops as load_ops
    tf.logging.set_verbosity(tf.logging.ERROR)

    args = parser.parse_args()

    cfg, is_transfer, task, config_name = generate_cfg(args.config, args.vid,
                                                       args)
    if task == 'class_places' or task == 'class_1000':
        synset = get_synset(task)
    if task == 'jigsaw':
        cfg['preprocess_fn'] = load_and_specify_preprocessors_for_input_depends_on_target

    print("Doing {task}".format(task=task))
    general_utils = importlib.reload(general_utils)
    tf.reset_default_graph()
    training_runners = {
        'sess': tf.InteractiveSession(),
        'coord': tf.train.Coordinator()
    }

    ############## Start dataloading workers ##############
    if is_transfer:
        get_data_prefetch_threads_init_fn = utils.get_data_prefetch_threads_init_fn_transfer
        setup_input_fn = utils.setup_input_transfer
    else:
        setup_input_fn = utils.setup_input
        get_data_prefetch_threads_init_fn = utils.get_data_prefetch_threads_init_fn

    ############## Set Up Inputs ##############
    # tf.logging.set_verbosity( tf.logging.INFO )
    inputs = setup_input_fn(cfg, is_training=False, use_filename_queue=False)
    RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
    RuntimeDeterminedEnviromentVars.populate_registered_variables()
    start_time = time.time()

    ############## Set Up Model ##############
    model = utils.setup_model(inputs, cfg, is_training=IN_TRAIN_MODE)
    m = model['model']
    model['saver_op'].restore(training_runners['sess'], cfg['model_path'])

    data_prefetch_init_fn = get_data_prefetch_threads_init_fn(
        inputs, cfg, is_training=False, use_filename_queue=False)
    prefetch_threads = threading.Thread(target=data_prefetch_init_fn,
                                        args=(training_runners['sess'],
                                              training_runners['coord']))

    prefetch_threads.start()
    list_of_fname = np.load(
        '/home/ubuntu/task-taxonomy-331b/assets/aws_data/video{}_fname.npy'.
        format(args.vid))
    import errno

    try:
        os.mkdir('/home/ubuntu/{}'.format(task))
        os.mkdir('/home/ubuntu/{}/vid1'.format(task))
        os.mkdir('/home/ubuntu/{}/vid2'.format(task))
        os.mkdir('/home/ubuntu/{}/vid3'.format(task))
        os.mkdir('/home/ubuntu/{}/vid4'.format(task))
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise
    curr_comp = np.zeros((3, 64))
    curr_fit_img = np.zeros((256, 256, 3))
    embeddings = []
    curr_vp = []
    curr_layout = []

    ############## Run First Batch ##############
    def rescale_l_for_display(batch, rescale=True):
        '''
        Prepares network output for display by optionally rescaling from [-1,1],
        and by setting some pixels to the min/max of 0/1. This prevents matplotlib
        from rescaling the images. 
        '''
        if rescale:
            display_batch = [
                rescale_image(im.copy(),
                              new_scale=[0, 100],
                              current_scale=[-1, 1]) for im in batch
            ]
        else:
            display_batch = batch.copy()
        for im in display_batch:
            im[0, 0,
               0] = 1.0  # Adjust some values so that matplotlib doesn't rescale
            im[0, 1, 0] = 0.0  # Now adjust the min
        return display_batch

    for step_num in range(inputs['max_steps'] - 1):
        #for step_num in range(20):
        #if step_num > 0 and step_num % 20 == 0:
        print(step_num)
        if is_transfer:
            (input_batch, target_batch, data_idx,
             predicted) = training_runners['sess'].run([
                 m.input_images, m.target_images, model['data_idxs'],
                 m.decoder.decoder_output
             ])
        else:
            (input_batch, target_batch, data_idx,
             predicted) = training_runners['sess'].run([
                 m.input_images, m.targets, model['data_idxs'],
                 m.decoder_output
             ])

        if task == 'segment2d' or task == 'segment25d':
            from sklearn.decomposition import PCA
            x = np.zeros((32, 256, 256, 3), dtype='float')
            k_embed = 8
            for i in range(predicted.shape[0]):
                embedding_flattened = np.squeeze(predicted[i]).reshape(
                    (-1, 64))
                embeddings.append(embedding_flattened)
                if len(embeddings) > k_embed:
                    embeddings.pop(0)
                pca = PCA(n_components=3)
                pca.fit(np.vstack(embeddings))
                min_order = None
                min_dist = float('inf')
                copy_of_comp = np.copy(pca.components_)
                for order in itertools.permutations([0, 1, 2]):
                    #reordered = pca.components_[list(order), :]
                    #dist = np.linalg.norm(curr_comp-reordered)
                    pca.components_ = copy_of_comp[order, :]
                    lower_dim = pca.transform(embedding_flattened).reshape(
                        (256, 256, -1))
                    lower_dim = (lower_dim - lower_dim.min()) / (
                        lower_dim.max() - lower_dim.min())
                    dist = np.linalg.norm(lower_dim - curr_fit_img)
                    if dist < min_dist:
                        min_order = order
                        min_dist = dist
                pca.components_ = copy_of_comp[min_order, :]
                lower_dim = pca.transform(embedding_flattened).reshape(
                    (256, 256, -1))
                lower_dim = (lower_dim - lower_dim.min()) / (lower_dim.max() -
                                                             lower_dim.min())
                curr_fit_img = np.copy(lower_dim)
                x[i] = lower_dim
            predicted = x
        if task == 'curvature':
            std = [31.922, 21.658]
            mean = [123.572, 120.1]
            predicted = (predicted * std) + mean
            predicted[:, 0, 0, :] = 0.
            predicted[:, 1, 0, :] = 1.
            predicted = np.squeeze(
                np.clip(predicted.astype(int) / 255., 0., 1.)[:, :, :, 0])

        if task == 'colorization':
            maxs = np.amax(predicted, axis=-1)
            softmax = np.exp(predicted - np.expand_dims(maxs, axis=-1))
            sums = np.sum(softmax, axis=-1)
            softmax = softmax / np.expand_dims(sums, -1)

            kernel = np.load(
                '/home/ubuntu/task-taxonomy-331b/lib/data/pts_in_hull.npy')
            gen_target_no_temp = np.dot(softmax, kernel)

            images_resized = np.zeros([0, 256, 256, 2], dtype=np.float32)
            for image in range(gen_target_no_temp.shape[0]):
                temp = scipy.ndimage.zoom(np.squeeze(
                    gen_target_no_temp[image]), (4, 4, 1),
                                          mode='nearest')
                images_resized = np.append(images_resized,
                                           np.expand_dims(temp, axis=0),
                                           axis=0)
            inp_rescale = rescale_l_for_display(input_batch)
            output_lab_no_temp = np.concatenate((inp_rescale, images_resized),
                                                axis=3).astype(np.float64)

            for i in range(input_batch.shape[0]):
                output_lab_no_temp[i, :, :, :] = skimage.color.lab2rgb(
                    output_lab_no_temp[i, :, :, :])
            predicted = output_lab_no_temp

        just_rescale = [
            'autoencoder', 'denoise', 'edge2d', 'edge3d', 'keypoint2d',
            'keypoint3d', 'reshade', 'rgb2sfnorm', 'impainting_whole'
        ]

        if task in just_rescale:
            predicted = (predicted + 1.) / 2.
            predicted = np.clip(predicted, 0., 1.)
            predicted[:, 0, 0, :] = 0.
            predicted[:, 1, 0, :] = 1.

        just_clip = ['rgb2depth', 'rgb2mist']
        if task in just_clip:
            predicted = np.exp(predicted * np.log(2.0**16.0)) - 1.0
            predicted = np.log(predicted) / 11.09
            predicted = (predicted - 0.64) / 0.18
            predicted = (predicted + 1.) / 2
            predicted[:, 0, 0, :] = 0.
            predicted[:, 1, 0, :] = 1.

        if task == 'segmentsemantic_rb':
            label = np.argmax(predicted, axis=-1)
            COLORS = ('white', 'red', 'blue', 'yellow', 'magenta', 'green',
                      'indigo', 'darkorange', 'cyan', 'pink', 'yellowgreen',
                      'black', 'darkgreen', 'brown', 'gray', 'purple',
                      'darkviolet')
            rgb = (input_batch + 1.) / 2.
            preds = [
                color.label2rgb(np.squeeze(x),
                                np.squeeze(y),
                                colors=COLORS,
                                kind='overlay')[np.newaxis, :, :, :]
                for x, y in zip(label, rgb)
            ]
            predicted = np.vstack(preds)

        if task in ['class_1000', 'class_places']:
            for file_idx, predict_output in zip(data_idx, predicted):
                to_store_name = list_of_fname[file_idx].decode(
                    'utf-8').replace('video', task)
                to_store_name = os.path.join('/home/ubuntu', to_store_name)
                sorted_pred = np.argsort(predict_output)[::-1]
                top_5_pred = [synset[sorted_pred[i]] for i in range(5)]
                to_print_pred = "Top 5 prediction: \n {}\n {}\n {}\n {} \n {}".format(
                    *top_5_pred)
                img = Image.new('RGBA', (400, 200), (255, 255, 255))
                d = ImageDraw.Draw(img)
                fnt = ImageFont.truetype(
                    '/usr/share/fonts/truetype/dejavu/DejaVuSerifCondensed.ttf',
                    25)
                d.text((20, 5), to_print_pred, fill=(255, 0, 0), font=fnt)
                img.save(to_store_name, 'PNG')
        elif task == 'vanishing_point_well_defined':
            counter = 0
            for file_idx, predict_output in zip(data_idx, predicted):
                to_store_name = list_of_fname[file_idx].decode(
                    'utf-8').replace('video', task)
                to_store_name = os.path.join('/home/ubuntu', to_store_name)
                curr_vp.append(
                    plot_vanishing_point_smoothed(
                        predict_output, (input_batch[counter] + 1.) / 2.,
                        to_store_name, curr_vp))
                if len(curr_vp) > 5:
                    curr_vp.pop(0)
                counter += 1
                #scipy.misc.toimage(result, cmin=0.0, cmax=1.0).save(to_store_name)
        elif task == 'room_layout':
            mean = np.array([
                0.006072743318127848, 0.010272365569691076, -3.135909774145468,
                1.5603802322235532, 5.6228218371102496e-05,
                -1.5669352793761442, 5.622875878174759, 4.082800262277375,
                2.7713941642895956
            ])
            std = np.array([
                0.8669452525283652, 0.687915294956501, 2.080513632043758,
                0.19627420479282623, 0.014680602791251812, 0.4183827359302299,
                3.991778013006544, 2.703495278378409, 1.2269185938626304
            ])
            predicted = predicted * std + mean
            counter = 0
            for file_idx, predict_output in zip(data_idx, predicted):
                to_store_name = list_of_fname[file_idx].decode(
                    'utf-8').replace('video', task)
                to_store_name = os.path.join('/home/ubuntu', to_store_name)
                plot_room_layout(predict_output,
                                 (input_batch[counter] + 1.) / 2.,
                                 to_store_name,
                                 curr_layout,
                                 cube_only=True)
                curr_layout.append(predict_output)
                if len(curr_layout) > 5:
                    curr_layout.pop(0)
                #scipy.misc.toimage(result, cmin=0.0, cmax=1.0).save(to_store_name)
                counter += 1
        elif task == 'segmentsemantic_rb':
            for file_idx, predict_output in zip(data_idx, predicted):
                to_store_name = list_of_fname[file_idx].decode(
                    'utf-8').replace('video', task)
                to_store_name = os.path.join('/home/ubuntu', to_store_name)
                process_semseg_frame(predict_output, to_store_name)
        elif task == 'jigsaw':
            predicted = np.argmax(predicted, axis=1)
            counter = 0
            for file_idx, predict_output in zip(data_idx, predicted):
                to_store_name = list_of_fname[file_idx].decode(
                    'utf-8').replace('video', task)
                to_store_name = os.path.join('/home/ubuntu', to_store_name)
                perm = cfg['target_dict'][predict_output]
                show_jigsaw((input_batch[counter] + 1.) / 2., perm,
                            to_store_name)
                counter += 1
        else:
            for file_idx, predict_output in zip(data_idx, predicted):
                to_store_name = list_of_fname[file_idx].decode(
                    'utf-8').replace('video', task)
                to_store_name = os.path.join('/home/ubuntu', to_store_name)
                scipy.misc.toimage(np.squeeze(predict_output),
                                   cmin=0.0,
                                   cmax=1.0).save(to_store_name)

    # subprocess.call('tar -czvf /home/ubuntu/{c}_{vid_id}.tar.gz /home/ubuntu/{t}/vid{vid_id}'.format(
    # c=config_name, t=task, vid_id=args.vid), shell=True)
    # subprocess.call('ffmpeg -r 29.97 -f image2 -s 256x256 -i /home/ubuntu/{t}/vid{vid_id}/0{vid_id}0%04d.png -vcodec libx264 -crf 15  {c}_{vid_id}.mp4'.format(
    # c=config_name, t=task, vid_id=args.vid), shell=True)
    subprocess.call(
        'ffmpeg -r 29.97 -f image2 -s 256x256 -i /home/ubuntu/{t}/vid{vid_id}/0{vid_id}0%04d.png -ss 00:01:54 -t 00:00:40 -c:v libvpx-vp9 -crf 10 -b:v 128k {c}_{vid_id}.webm'
        .format(c=config_name, t=task, vid_id=args.vid),
        shell=True)
    # subprocess.call('ffmpeg -r 29.97 -f image2 -s 256x256 -i /home/ubuntu/{t}/vid{vid_id}/0{vid_id}0%04d.png -vcodec libx264 -crf 15  -pix_fmt yuv420p {c}_{vid_id}.mp4'.format(
    # c=config_name, t=task, vid_id=args.vid), shell=True)
    subprocess.call(
        'sudo mkdir -p /home/ubuntu/s3/video_new/{t}'.format(t=task),
        shell=True)
    #subprocess.call('sudo mkdir -p /home/ubuntu/s3/video_new_all/{t}'.format(t=task), shell=True)
    #     subprocess.call('aws s3 cp /home/ubuntu/{c}_{vid_id}.tar.gz s3://task-preprocessing-512-oregon/video_new_all/{t}/'.format(
    # c=config_name, t=task, vid_id=args.vid), shell=True)
    subprocess.call(
        'aws s3 cp {c}_{vid_id}.webm s3://task-preprocessing-512-oregon/video_new/{t}/'
        .format(c=config_name, t=task, vid_id=args.vid),
        shell=True)

    # subprocess.call('aws s3 cp /home/ubuntu/{c}_{vid_id}.tar.gz s3://taskonomy-unpacked-oregon/video_tar_all/{t}/'.format(
    # c=config_name, t=task, vid_id=args.vid), shell=True)
    # subprocess.call('aws s3 cp {c}_{vid_id}.mp4 s3://taskonomy-unpacked-oregon/video_all/{t}/'.format(
    #     c=config_name, t=task, vid_id=args.vid), shell=True)

    ############## Clean Up ##############
    training_runners['coord'].request_stop()
    training_runners['coord'].join()
    print("Done: {}".format(config_name))

    ############## Reset graph and paths ##############
    tf.reset_default_graph()
    training_runners['sess'].close()

    return
def run_extract_representations(args, cfg, save_dir, given_task):
    transfer = (cfg['model_type'] == architectures.TransferNet)
    if transfer:
        get_data_prefetch_threads_init_fn = utils.get_data_prefetch_threads_init_fn_transfer
        setup_input_fn = utils.setup_input_transfer
    else:
        setup_input_fn = utils.setup_input
        get_data_prefetch_threads_init_fn = utils.get_data_prefetch_threads_init_fn

    # set up logging
    tf.logging.set_verbosity(tf.logging.INFO)

    with tf.Graph().as_default() as g:
        # create ops and placeholders
        tf.logging.set_verbosity(tf.logging.INFO)
        inputs = setup_input_fn(cfg,
                                is_training=False,
                                use_filename_queue=False)
        RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
        RuntimeDeterminedEnviromentVars.populate_registered_variables()

        # build model (and losses and train_op)

        # set up metrics to evaluate
        # names_to_values, names_to_updates = setup_metrics( inputs, model, cfg )

        # execute training
        start_time = time.time()
        utils.print_start_info(cfg, inputs['max_steps'], is_training=False)

        # start session and restore model
        training_runners = {
            'sess': tf.Session(),
            'coord': tf.train.Coordinator()
        }
        try:
            if cfg['model_path'] is None:
                print('Please specify a checkpoint directory')
                return

            utils.print_start_info(cfg, inputs['max_steps'], is_training=False)
            data_prefetch_init_fn = get_data_prefetch_threads_init_fn(
                inputs, cfg, is_training=False, use_filename_queue=False)
            prefetch_threads = threading.Thread(
                target=data_prefetch_init_fn,
                args=(training_runners['sess'], training_runners['coord']))
            prefetch_threads.start()

            # run one example so that we can calculate some statistics about the representations
            filenames = []
            representations, data_idx = training_runners['sess'].run(
                [inputs['input_batch'], inputs['data_idxs']])

            filenames.extend(data_idx)
            if type(representations) == list:
                representations = representations[0]
            representations = representations.reshape(
                (-1, np.prod(cfg['input_dim'])))
            print('Got first batch representation with size: {0}'.format(
                representations.shape))

            # run the remaining examples
            for step in range(inputs['max_steps'] - 1):
                #for step in range( 10 ):
                if step % 100 == 0:
                    print('Step {0} of {1}'.format(step,
                                                   inputs['max_steps'] - 1))

                # This is just for GAN, for the LEO meeting
                encoder_output, data_idx = training_runners['sess'].run(
                    [inputs['input_batch'], inputs['data_idxs']])
                if type(encoder_output) == list:
                    encoder_output = encoder_output[0]
                representations = np.append(representations,
                                            encoder_output.reshape(
                                                (-1,
                                                 np.prod(cfg['input_dim']))),
                                            axis=0)
                filenames.extend(data_idx)

                if training_runners['coord'].should_stop():
                    break

            print(
                'The size of representations is %s while we expect it to run for %d steps with batchsize %d'
                % (representations.shape, inputs['max_steps'],
                   cfg['batch_size']))

            end_train_time = time.time() - start_time
            save_path = os.path.join(
                save_dir, '{task}_{split}_representations.pkl'.format(
                    task='pixels', split=args.data_split))

            with open(save_path, 'wb') as f:
                pickle.dump(
                    {
                        'file_indexes': filenames,
                        'representations': representations
                    }, f)

            copy_to = None
            if args.out_dir:
                os.system("sudo cp {fp} {out}/".format(fp=save_path,
                                                       out=args.out_dir))
                copy_to = args.out_dir
            else:
                if transfer:
                    os.system(
                        "sudo cp {fp} /home/ubuntu/s3/model_log/representations_transfer/"
                        .format(fp=save_path))
                    copy_to = '/home/ubuntu/s3/model_log/representations_transfer/'
                else:
                    os.system(
                        "sudo cp {fp} /home/ubuntu/s3/model_log/representations/"
                        .format(fp=save_path))
                    copy_to = "/home/ubuntu/s3/model_log/representations/"

            print('saved representations to {0}'.format(save_path))
            print('copied representations to {0}'.format(copy_to))
            print('time to extract %d epochs: %.3f hrs' %
                  (cfg['num_epochs'], end_train_time / (60 * 60)))
        finally:
            utils.request_data_loading_end(training_runners)
            utils.end_data_loading_and_sess(training_runners)
def run_to_task(task_to):

    import general_utils
    from general_utils import RuntimeDeterminedEnviromentVars
    import models.architectures as architectures
    from data.load_ops import resize_rescale_image
    import utils
    from data.task_data_loading import load_and_specify_preprocessors_for_representation_extraction
    import lib.data.load_ops as load_ops
    tf.logging.set_verbosity(tf.logging.ERROR)

    all_outputs = {}
    pickle_dir = 'viz_output_single_task.pkl'
    import os
    if os.path.isfile(pickle_dir):
        with open(pickle_dir, 'rb') as fp:
            all_outputs = pickle.load(fp)

    for task in list_of_tasks:
        if task in all_outputs:
            print("{} already exists....\n\n\n".format(task))
            continue
        print("Doing {task}".format(task=task))
        general_utils = importlib.reload(general_utils)
        tf.reset_default_graph()
        training_runners = {
            'sess': tf.InteractiveSession(),
            'coord': tf.train.Coordinator()
        }

        # task = '{f}__{t}__{hs}'.format(f=task_from, t=task_to, hs=args.hs)
        CONFIG_DIR = '/home/ubuntu/task-taxonomy-331b/experiments/final/{TASK}'.format(
            TASK=task)

        ############## Load Configs ##############
        cfg = utils.load_config(CONFIG_DIR, nopause=True)
        RuntimeDeterminedEnviromentVars.register_dict(cfg)
        split_file = cfg['test_filenames'] if ON_TEST_SET else cfg[
            'val_filenames']
        cfg['train_filenames'] = split_file
        cfg['val_filenames'] = split_file
        cfg['test_filenames'] = split_file

        cfg['num_epochs'] = 1
        cfg['randomize'] = False
        root_dir = cfg['root_dir']
        cfg['num_read_threads'] = 1
        print(cfg['log_root'])
        if task == 'jigsaw':
            continue
        cfg['model_path'] = os.path.join(cfg['log_root'], task,
                                         'model.permanent-ckpt')

        print(cfg['model_path'])
        if cfg['model_path'] is None:
            continue

        ############## Set Up Inputs ##############
        # tf.logging.set_verbosity( tf.logging.INFO )
        inputs = utils.setup_input(
            cfg, is_training=ON_TEST_SET, use_filename_queue=False
        )  # is_training determines whether to use train/validaiton
        RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
        RuntimeDeterminedEnviromentVars.populate_registered_variables()
        start_time = time.time()
        # utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )

        ############## Set Up Model ##############
        model = utils.setup_model(inputs, cfg, is_training=IN_TRAIN_MODE)
        m = model['model']
        model['saver_op'].restore(training_runners['sess'], cfg['model_path'])

        ############## Start dataloading workers ##############
        data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn(
            inputs, cfg, is_training=ON_TEST_SET, use_filename_queue=False)

        prefetch_threads = threading.Thread(target=data_prefetch_init_fn,
                                            args=(training_runners['sess'],
                                                  training_runners['coord']))
        prefetch_threads.start()

        ############## Run First Batch ##############
        if not hasattr(m, 'masks'):
            (
                input_batch,
                target_batch,
                data_idx,
                predicted,
                loss,
            ) = training_runners['sess'].run([
                m.input_images, m.targets, model['data_idxs'],
                m.decoder_output, m.total_loss
            ])
            mask_batch = 1.
        else:
            (
                input_batch,
                target_batch,
                mask_batch,
                data_idx,
                predicted,
                loss,
            ) = training_runners['sess'].run([
                m.input_images, m.targets, m.masks, model['data_idxs'],
                m.decoder_output, m.total_loss
            ])

        if task == 'segment2d' or task == 'segment25d':
            from sklearn.decomposition import PCA
            x = np.zeros((32, 256, 256, 3), dtype='float')
            for i in range(predicted.shape[0]):
                embedding_flattened = np.squeeze(predicted[i]).reshape(
                    (-1, 64))
                pca = PCA(n_components=3)
                pca.fit(embedding_flattened)
                lower_dim = pca.transform(embedding_flattened).reshape(
                    (256, 256, -1))
                lower_dim = (lower_dim - lower_dim.min()) / (lower_dim.max() -
                                                             lower_dim.min())
                x[i] = lower_dim
            predicted = x

        ############## Clean Up ##############
        training_runners['coord'].request_stop()
        training_runners['coord'].join()

        # if os.path.isfile(pickle_dir):
        #     with open(pickle_dir, 'rb') as fp:
        #         all_outputs = pickle.load(fp)

        ############## Store to dict ##############
        to_store = {
            'input': input_batch,
            'target': target_batch,
            'mask': mask_batch,
            'data_idx': data_idx,
            'output': predicted
        }
        all_outputs[task] = to_store

        print("Done: {}".format(task))
        # os.system("sudo cp {d} /home/ubuntu/s3/model_log".format(d=pickle_dir))

        ############## Reset graph and paths ##############
        tf.reset_default_graph()
        training_runners['sess'].close()
        try:
            del sys.modules['config']
        except:
            pass
        sys.path = remove_dups(sys.path)
        print("FINISHED: {}\n\n\n\n\n\n".format(task))
        pickle_dir = 'viz_output_single_task.pkl'
        with open(pickle_dir, 'wb') as fp:
            pickle.dump(all_outputs, fp)
        try:
            subprocess.call(
                "aws s3 cp {} s3://task-preprocessing-512-oregon/visualizations/"
                .format(pickle_dir),
                shell=True)
        except:
            subprocess.call(
                "sudo cp {} /home/ubuntu/s3/visualizations/".format(
                    pickle_dir),
                shell=True)

    return