コード例 #1
0
def save_img(img, save_path):
    image_numpy = util.tensor2im(img)
    util.save_image(image_numpy, save_path, create_dir=True)
    return image_numpy


if __name__ == '__main__':


    opt = TestOptions().parse()

    data_info = data.dataset_info()
    datanum = data_info.get_dataset(opt)[0]
    folderlevel = data_info.folder_level[datanum]

    dataloaders = data.create_dataloader_test(opt)

    visualizer = Visualizer(opt)
    iter_counter = IterationCounter(opt, len(dataloaders[0]) * opt.render_thread)
    # create a webpage that summarizes the all results

    testing_queue = Queue(10)

    ngpus = opt.device_count

    render_gpu_ids = list(range(ngpus - opt.render_thread, ngpus))
    render_layer_list = []
    for gpu in render_gpu_ids:
        opt.gpu_ids = gpu
        render_layer = TestRender(opt)
        render_layer_list.append(render_layer)
コード例 #2
0
        NB_STACKS, SIGMA))
    sys.stdout.flush()

    # define and build the model
    with tf.variable_scope("model", reuse=False):
        model_train = StackedHourglass(nb_stacks=NB_STACKS, sigma=SIGMA)
        all_heatmaps_pred_train, p2d_pred_train = model_train(im_train, True)

#    sys.exit(0)

    with tf.variable_scope("model", reuse=True):
        model_valid = StackedHourglass(nb_stacks=NB_STACKS, sigma=SIGMA)
        all_heatmaps_pred_valid, p2d_pred_valid = model_valid(im_valid, False)

    # test data
    im_test = create_dataloader_test(
        data_root=DATA_PATH)  # load test data with batch_size=1
    with tf.variable_scope("model", reuse=True):
        model_test = StackedHourglass(nb_stacks=NB_STACKS, sigma=SIGMA)
        all_heatmaps_pred_test, p2d_pred_test = model_test(im_test, False)

#    sys.exit(0)

# compute loss
    print("Loss...")
    sys.stdout.flush()
    loss_train = model_train.compute_loss(p2d_gt_train,
                                          all_heatmaps_pred_train)
    loss_valid = model_valid.compute_loss(p2d_gt_valid,
                                          all_heatmaps_pred_valid)
    #    sys.exit(0)
コード例 #3
0
def _inference(args):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.visible_device_list = "0"
    with tf.Session(config=config) as sess:
        dataloader_train, dataloader_valid, max_row, max_col, iterator_output, rcstrs_output, iterator_output_valid, rcstrs_output_valid = create_dataloader_train(valid_ratio=args.valid_ratio, batch_size=args.batch_size)
        #dataloader_test, row_col_prediction, rcstrs = create_dataloader_test(batch_size=args.batch_size)
        iterator_test, row_col_prediction, rcstrs = create_dataloader_test(batch_size=args.batch_size)
        sess.run([iterator_test.initializer,
                  #iterator_output.initializer,
                  iterator_output_valid.initializer])
        dataloader_test = iterator_test.get_next()
        #dataloader_output = iterator_output.get_next()
        dataloader_output_valid = iterator_output_valid.get_next()
        #row_col_train, label_train = dataloader_train
        #row_col_valid, label_valid = dataloader_valid
        #row_col_output, label_output = dataloader_output
        row_col_output_valid, label_output_valid = dataloader_output_valid
        row_col_test, label_test = dataloader_test
        if args.model == "NeuCF2":
            #with tf.variable_scope("model", reuse=False):
            #    model_train = NeuCF2(row_col_train, label_train, max_row, max_col, args)
            #    sess.run(tf.global_variables_initializer())

            #with tf.variable_scope("model", reuse=True):
            #    model_valid = NeuCF2(row_col_valid, label_valid, max_row, max_col, args)
            #    sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=False):
                model_test = NeuCF2(row_col_test, label_test, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())
            #with tf.variable_scope("model", reuse=True):
            #    model_output = NeuCF2(row_col_output, label_output, max_row, max_col, args)
            #    sess.run(tf.global_variables_initializer())    
            with tf.variable_scope("model", reuse=True):
                model_output_valid = NeuCF2(row_col_output_valid, label_output_valid, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            if args.external_embedding:
                model_test.init_embedding(sess, args)
                #model_valid.init_embedding(sess, args)
                #model_test.init_embedding(sess, args)
        elif args.model == "NeuCF3":
            #with tf.variable_scope("model", reuse=False):
            #    model_train = NeuCF3(row_col_train, label_train, max_row, max_col, args)
            #    sess.run(tf.global_variables_initializer())

            #with tf.variable_scope("model", reuse=True):
            #    model_valid = NeuCF3(row_col_valid, label_valid, max_row, max_col, args)
            #    sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=False):
                model_test = NeuCF3(row_col_test, label_test, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            #with tf.variable_scope("model", reuse=True):
            #    model_output = NeuCF3(row_col_output, label_output, max_row, max_col, args)
            #    sess.run(tf.global_variables_initializer())    
            with tf.variable_scope("model", reuse=True):
                model_output_valid = NeuCF3(row_col_output_valid, label_output_valid, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            if args.external_embedding:
                model_test.init_embedding(sess, args)     
        elif args.model == "NeuCF4":
            #with tf.variable_scope("model", reuse=False):
            #    model_train = NeuCF4(row_col_train, label_train, max_row, max_col, args)
            #    sess.run(tf.global_variables_initializer())

            #with tf.variable_scope("model", reuse=True):
            #    model_valid = NeuCF4(row_col_valid, label_valid, max_row, max_col, args)
            #    sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=False):
                model_test = NeuCF4(row_col_test, label_test, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            #with tf.variable_scope("model", reuse=True):
            #    model_output = NeuCF4(row_col_output, label_output, max_row, max_col, args)
            #    sess.run(tf.global_variables_initializer())    
            with tf.variable_scope("model", reuse=True):
                model_output_valid = NeuCF4(row_col_output_valid, label_output_valid, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())
            if args.external_embedding:
                model_test.init_embedding(sess, args)                   
        elif args.model == "NeuCF":
            #with tf.variable_scope("model", reuse=False):
            #    model_train = NeuCF(row_col_train, label_train, max_row, max_col, args)
            #    sess.run(tf.global_variables_initializer())

            #with tf.variable_scope("model", reuse=True):
            #    model_valid = NeuCF(row_col_valid, label_valid, max_row, max_col, args)
            #    sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=False):
                model_test = NeuCF(row_col_test, label_test, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())           

            #with tf.variable_scope("model", reuse=True):
            #    model_output = NeuCF(row_col_output, label_output, max_row, max_col, args)
            #    sess.run(tf.global_variables_initializer())    
            with tf.variable_scope("model", reuse=True):
                model_output_valid = NeuCF(row_col_output_valid, label_output_valid, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())
        vars = [v for v in tf.global_variables() if v.name.startswith("model/NeuCF")]
        saver = tf.train.Saver(vars)
        saver.restore(sess, args.model_path)

        output_valid_prediction = None
        for j in range( math.ceil( len(rcstrs_output_valid) / args.batch_size) ):
            predict = model_output_valid.step(sess, isTesting=True, dropout_keep_prob=1)
            if j == 0:
                output_valid_prediction = predict
            else:
                output_valid_prediction = np.concatenate( [output_valid_prediction, predict] , axis=0 )
        output_valid_prediction = np.reshape(output_valid_prediction, (output_valid_prediction.shape[0],))
        df = pd.DataFrame( {'Id': rcstrs_output_valid,'Prediction': output_valid_prediction} )
        df.to_csv(args.output_valid_path,index=False)
        test_prediction = None
        for j in range(math.ceil(row_col_prediction.shape[0] / args.batch_size)):
            predict = model_test.step(sess, isTesting=True, dropout_keep_prob=1)
            if j == 0:
                test_prediction = predict
            else:
                test_prediction = np.concatenate([test_prediction, predict], axis=0)
        test_prediction = np.reshape(test_prediction, (test_prediction.shape[0],))
        
        # data frame is reconstructed since the direct modification is too slow
        df = pd.DataFrame({'Id': rcstrs,'Prediction': test_prediction})
        df.to_csv(args.test_path,index=False)
コード例 #4
0
def _train(args):
    os.makedirs(os.path.join(args.log_path), exist_ok=True)
    logfile = open(os.path.join(args.log_path, 'log.txt'), 'w', buffering=1)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.visible_device_list = "0"
    with tf.Session(config=config) as sess:
        dataloader_train, dataloader_valid, max_row, max_col, iterator_output, rcstrs_output, iterator_output_valid, rcstrs_output_valid = create_dataloader_train(valid_ratio=args.valid_ratio, batch_size=args.batch_size)
        #dataloader_test, row_col_prediction, rcstrs = create_dataloader_test(batch_size=args.batch_size)
        iterator_test, row_col_prediction, rcstrs = create_dataloader_test(batch_size=args.batch_size)
        sess.run([iterator_test.initializer,
                  iterator_output.initializer,
                  iterator_output_valid.initializer])
        dataloader_test = iterator_test.get_next()
        dataloader_output = iterator_output.get_next()
        dataloader_output_valid = iterator_output_valid.get_next()
        row_col_train, label_train = dataloader_train
        row_col_valid, label_valid = dataloader_valid
        row_col_output, label_output = dataloader_output
        row_col_output_valid, label_output_valid = dataloader_output_valid
        row_col_test, label_test = dataloader_test
        if args.model == "NeuCF2":
            with tf.variable_scope("model", reuse=False):
                model_train = NeuCF2(row_col_train, label_train, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=True):
                model_valid = NeuCF2(row_col_valid, label_valid, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=True):
                model_test = NeuCF2(row_col_test, label_test, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())
            with tf.variable_scope("model", reuse=True):
                model_output = NeuCF2(row_col_output, label_output, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())    
            with tf.variable_scope("model", reuse=True):
                model_output_valid = NeuCF2(row_col_output_valid, label_output_valid, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            if args.external_embedding:
                model_train.init_embedding(sess, args)
                #model_valid.init_embedding(sess, args)
                #model_test.init_embedding(sess, args)
        elif args.model == "NeuCF3":
            with tf.variable_scope("model", reuse=False):
                model_train = NeuCF3(row_col_train, label_train, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=True):
                model_valid = NeuCF3(row_col_valid, label_valid, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=True):
                model_test = NeuCF3(row_col_test, label_test, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=True):
                model_output = NeuCF3(row_col_output, label_output, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())    
            with tf.variable_scope("model", reuse=True):
                model_output_valid = NeuCF3(row_col_output_valid, label_output_valid, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            if args.external_embedding:
                model_train.init_embedding(sess, args)     
        elif args.model == "NeuCF4":
            with tf.variable_scope("model", reuse=False):
                model_train = NeuCF4(row_col_train, label_train, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=True):
                model_valid = NeuCF4(row_col_valid, label_valid, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=True):
                model_test = NeuCF4(row_col_test, label_test, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=True):
                model_output = NeuCF4(row_col_output, label_output, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())    
            with tf.variable_scope("model", reuse=True):
                model_output_valid = NeuCF4(row_col_output_valid, label_output_valid, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())
            if args.external_embedding:
                model_train.init_embedding(sess, args)                   
        elif args.model == "NeuCF":
            with tf.variable_scope("model", reuse=False):
                model_train = NeuCF(row_col_train, label_train, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=True):
                model_valid = NeuCF(row_col_valid, label_valid, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())

            with tf.variable_scope("model", reuse=True):
                model_test = NeuCF(row_col_test, label_test, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())           

            with tf.variable_scope("model", reuse=True):
                model_output = NeuCF(row_col_output, label_output, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())    
            with tf.variable_scope("model", reuse=True):
                model_output_valid = NeuCF(row_col_output_valid, label_output_valid, max_row, max_col, args)
                sess.run(tf.global_variables_initializer())
        summary_writer = tf.summary.FileWriter(args.log_path, sess.graph)

        vars = [v for v in tf.global_variables() if v.name.startswith("model/NeuCF")]
        saver = tf.train.Saver(vars, max_to_keep=200)
        should_stop = False
        stopping_step = 0
        cur_best_pre_0 = 1.0
        for i in range(args.epochs):
            epoch_loss = 0
            for j in range(args.epoch_iter):
                batch_loss = model_train.step(sess, isTraining=True, dropout_keep_prob=0.5)
                epoch_loss += batch_loss / args.epoch_iter
            valid_loss = 0
            valid_sse = 0
            for j in range(args.valid_iter):
                batch_loss, batch_sse = model_valid.step(sess, isValidating=True, dropout_keep_prob=1)
                valid_loss += batch_loss / args.valid_iter
                valid_sse += batch_sse
            valid_sse = valid_sse / (args.valid_iter * args.batch_size)
            valid_rmse = np.sqrt(valid_sse)
            print( '--Avg. Train Loss ='+str(epoch_loss)[:6] + '    --Avg. Valid Loss ='+str(valid_loss)[:6]+ '    --Valid RMSE = '+str(valid_rmse)[:6]+'\n' )
            logfile.write( '--Avg. Train Loss ='+str(epoch_loss)[:6] + '    --Avg. Valid Loss ='+str(valid_loss)[:6]+ '    --Valid RMSE = '+str(valid_rmse)[:6]+'\n' )
            logfile.flush()
            cur_best_pre_0, stopping_step, should_stop = early_stopping(valid_rmse, cur_best_pre_0, 
                                                                        stopping_step, expected_order='dec',
                                                                        flag_step=args.flag_step)
            if should_stop == True:
                break
            #saver.save(sess, os.path.join(args.log_path,'model'), global_step=i, write_meta_graph=False)
            if valid_rmse == cur_best_pre_0 and args.save_flag:
                saver.save(sess, os.path.join(args.log_path,'model'), global_step=i+1, write_meta_graph=False)
                output_prediction = None
                for j in range( math.ceil( len(rcstrs_output) / args.batch_size) ):
                    predict = model_output.step(sess, isTesting=True, dropout_keep_prob=1)
                    if j == 0:
                        output_prediction = predict
                    else:
                        output_prediction = np.concatenate([output_prediction, predict], axis=0)
                output_prediction = np.reshape(output_prediction, (output_prediction.shape[0],))
                df = pd.DataFrame( {'Id': rcstrs_output,'Prediction': output_prediction} )
                df.to_csv(os.path.join(args.log_path, 'output' + str(i+1)+".csv" ),index=False)
                #copyfile( os.path.join(args.log_path, 'output' + str(i+1)+".csv" ) , "../" )
                output_valid_prediction = None
                for j in range( math.ceil( len(rcstrs_output_valid) / args.batch_size) ):
                    predict = model_output_valid.step(sess, isTesting=True, dropout_keep_prob=1)
                    if j == 0:
                        output_valid_prediction = predict
                    else:
                        output_valid_prediction = np.concatenate( [output_valid_prediction, predict] , axis=0 )
                output_valid_prediction = np.reshape(output_valid_prediction, (output_valid_prediction.shape[0],))
                df = pd.DataFrame( {'Id': rcstrs_output_valid,'Prediction': output_valid_prediction} )
                df.to_csv(os.path.join(args.log_path, 'output_valid' + str(i+1)+".csv" ),index=False)
                copyfile( os.path.join(args.log_path, 'output_valid' + str(i+1)+".csv" ) , 
                                       os.path.join("../cache", args.log_path.split("/")[-2] ) )
                test_prediction = None
                for j in range(math.ceil(row_col_prediction.shape[0] / args.batch_size)):
                    predict = model_test.step(sess, isTesting=True, dropout_keep_prob=1)
                    if j == 0:
                        test_prediction = predict
                    else:
                        test_prediction = np.concatenate([test_prediction, predict], axis=0)
                test_prediction = np.reshape(test_prediction, (test_prediction.shape[0],))
                
                # data frame is reconstructed since the direct modification is too slow
                df = pd.DataFrame({'Id': rcstrs,'Prediction': test_prediction})
                df.to_csv(os.path.join(args.log_path, 'submission' + str(i+1)+".csv" ),index=False)
                copyfile( os.path.join(args.log_path, 'submission' + str(i+1)+".csv" ) , 
                                       os.path.join("../test", args.log_path.split("/")[-2] ) )
                sess.run([iterator_test.initializer,
                        iterator_output.initializer,
                        iterator_output_valid.initializer])
                dataloader_test = iterator_test.get_next()
                dataloader_output = iterator_output.get_next()
                dataloader_output_valid = iterator_output_valid.get_next()
                row_col_test, label_test = dataloader_test
                row_col_output, label_output = dataloader_output
                row_col_output_valid, label_output_valid = dataloader_output_valid
コード例 #5
0
BATCH_SIZE = 1 # predict one by one, allows to use drop_remainder=true which gives back a batched set with a know batch dimension (used by tf.unstack() in HG model)

# Path
list_of_files = glob.glob(os.path.join(CURR_DIR, "log_HG2D", "*"))
LOG_PATH = max(list_of_files, key=os.path.getctime) # latest created dir for latest experiment
CHECKPOINTS_PATH  =os.path.join(LOG_PATH, "checkpoints") # we restore the lastest saved model from the latest experiment
CLUSTER_PATH = "/cluster/project/infk/hilliges/lectures/mp19/project2/"
LOCAL_PATH = "."
if os.path.exists(CLUSTER_PATH):
    DATA_PATH = CLUSTER_PATH
else:
    DATA_PATH = LOCAL_PATH

with tf.Session() as sess:
    # load images
    im = create_dataloader_test(data_root=DATA_PATH)

    # define model
    model = StackedHourglass(nb_stacks=NB_STACKS, sigma=SIGMA)
    
    # build the model
    all_heatmaps_pred, p2d_pred = model(im, training=False)

    # restore weights
    saver = tf.train.Saver()
    saver.restore(sess,tf.train.latest_checkpoint(CHECKPOINTS_PATH))

    predictions = None
    with trange(math.ceil(NUM_SAMPLES)) as t: # generate predictions for all images
        for i in t:
            image, p2d_out_value = sess.run([im, p2d_pred])