예제 #1
0
 def setUp(self):
     import json
     testcase_config_filename = '/tmp/testcase_config.json'
     testcase_config = {
         "data_path": "utils/testing.txt",
         "data_path_samples": "/tmp/in_dir/images",
         "data_path_gt": "/tmp/in_dir/maps",
         "data_path_pred": "/tmp/in_dir/pred_maps",
         "is_training": "False",
         "validation_percentage": "10",
         "testing_percentage": "10",
         "weights_file": "/tmp/vgg16_weights.npz",
         "dataset_name": "crowd_maps",
         "work_dir": "/tmp/work_dir/",
         "exp_name": "crowdnetreg_debug",
         "checkpoint_to_restore": "no_restore",
         "num_epochs": 20,
         "learning_rate": 0.001,
         "batch_size": 10,
         "max_to_keep": 5
     }
     with open(testcase_config_filename, 'w') as f:
         json.dump(testcase_config, f)
     testcase_config_reloaded = config.process_config(
         testcase_config_filename)
     self.dsetsloader = datahandler.FileListDatasetLoader(
         testcase_config_reloaded)
예제 #2
0
def evaluate():

    args = get_args()
    config = process_config(args.config)
    # load dataset file
    dataset = load_pair_paths(config)

    metric_names = []
    results = []
    model_names = []

    config.unpool_type = "simple"
    config.exp_name = "nyu-resnet-berhu-aug-30-simple-upproject"
    config.prediction_model_name = "model-150-0.19.km"
    config.model_dir = os.path.join("../experiments", config.exp_name, "model/")
    config.tensorboard_dir = os.path.join("../experiments", config.exp_name, "log/")

    metric_names, result = extract(dataset, config)
    results.append(result)
    model_names.append(config.unpool_type + "_"+ config.model_type+ "_" + config.loss_type)

    config.unpool_type = "deconv"
    config.exp_name = "nyu-resnet-berhu-aug-30-deconv-upproject"
    config.prediction_model_name = "model-150-0.21.km"
    config.model_dir = os.path.join("../experiments", config.exp_name, "model/")
    config.tensorboard_dir = os.path.join("../experiments", config.exp_name, "log/")

    metric_names, result = extract(dataset, config)
    results.append(result)
    model_names.append(config.unpool_type + "_"+ config.model_type+ "_" + config.loss_type)

    config.unpool_type = "checkerboard"
    config.exp_name = "nyu-resnet-berhu-aug-30-checkerboard-upproject"
    config.prediction_model_name = "model-150-0.20.km"
    config.model_dir = os.path.join("../experiments", config.exp_name, "model/")
    config.tensorboard_dir = os.path.join("../experiments", config.exp_name, "log/")

    metric_names, result = extract(dataset, config)
    results.append(result)
    model_names.append(config.unpool_type + "_"+ config.model_type+ "_" + config.loss_type)

    config.unpool_type = "resize"
    config.exp_name = "nyu-resnet-berhu-aug-30-resize-upproject"
    config.prediction_model_name = "model-150-0.20.km"
    config.model_dir = os.path.join("../experiments", config.exp_name, "model/")
    config.tensorboard_dir = os.path.join("../experiments", config.exp_name, "log/")

    metric_names, result = extract(dataset, config)
    results.append(result)
    model_names.append(config.unpool_type + "_"+ config.model_type+ "_" + config.loss_type)

    print(metric_names)
    print(results)
    print(model_names)
예제 #3
0
    def __init__(self, configuration_file_path, gloves_name=None):
        """

        :param model_path: path to the static gestures model
        :param configuration_file_path: path to the configuration file to load
        :param glove_path: path to a glove if a pretrained glove is selected
        """

        self.configuration_file = process_config(
            configuration_file_path)  # load the configuration file
        #static_model path
        #model_path = self.configuration_file["static_model_path"]
        model_path = eval(self.configuration_file["static_model_path"])
        #print("static model path:",model_path)

        self.static_model = load_model(model_path)

        self.mouse_controller = controllers.MouseController(
            self.configuration_file)
        self.keyboard_controller = controllers.KeyboardController(
            self.configuration_file)
        self.dynamic_controller = controllers.DynamicController(
            self.configuration_file)
        self.intermediate_controller = controllers.IntermediateController(
            self.configuration_file)

        #add a new glove
        if gloves_name is None:
            file_name = time.strftime("%Y%m%d-%H%M%S")
            self.gloves = Gloves()
            save_path = os.path.join(os.getcwd(), 'Gloves', file_name + '.txt')
            gloves_image = self.gloves.train()

            #pickle.dump(self.gloves, save_path)
            with open(save_path, 'wb') as handle:
                pickle.dump(self.gloves,
                            handle,
                            protocol=pickle.HIGHEST_PROTOCOL)
            image_path = os.path.join(os.getcwd(), 'Gloves',
                                      file_name + '.jpg')
            cv2.imwrite(image_path, gloves_image)
        #load a saved glove
        else:
            #self.gloves = pickle.load(os.path.join(os.getcwd(), 'Gloves', gloves_name))
            glove_path = os.path.join(os.getcwd(), "Gloves",
                                      gloves_name + ".txt")
            with open(glove_path, 'rb') as handle:
                self.gloves = pickle.load(handle)

        self.current_mode = MODE.KEYBOARD
        self.start()
예제 #4
0
def train():
    # load config file and prepare experiment
    args = get_args()
    config = process_config(args.config)
    create_dirs([config.model_dir, config.tensorboard_dir])

    # load dataset file
    dataset = load_pair_paths(config)

    # split dataset train and test
    train_pairs, test_pairs = split_dataset(config, dataset)

    if config.debug:
        print("WARNING!!! DEBUG MODE ON! 100 training.")
        train_pairs = train_pairs[:100]
        print(train_pairs)
        test_pairs = test_pairs[:100]
        print(test_pairs)

    # Calculate steps for each epoch
    train_num_steps = calculate_num_iter(config, train_pairs)
    test_num_steps = calculate_num_iter(config, test_pairs)


    # Create the model
    model = depth_model(config)

    #set dynamic output shape
    config.output_size = list(model.output_shape[1:])

    # Create train and test data generators
    train_gen = tf_data_generator(config, train_pairs, is_training=True)
    test_gen = tf_data_generator(config,test_pairs, is_training=False)

    # Prepare for training
    model.compile(optimizer=select_optimizer(config), loss=select_loss(config))


    model.fit(
        train_gen,
        steps_per_epoch=train_num_steps,
        epochs=config.num_epochs,
        callbacks=create_callbacks(config),
        validation_data=test_gen,
        validation_steps=test_num_steps,
        verbose=1)



    print("Training Done.")
예제 #5
0
def predict():

    args = get_args()
    config = process_config(args.config)
    model = load_depth_model_from_weights(config)

    for i in range(8):

        img = image.load_img('../images/' + str(i + 1) + '.jpg',
                             target_size=(config.input_size[0],
                                          config.input_size[1]))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        prediction = model.predict(x)
        print("prediction shape", prediction.shape)
        prediction = np.reshape(prediction,
                                [prediction.shape[1], prediction.shape[2]])
        plt.imsave('../images/depth_' + str(i + 1) + '.jpg', prediction)
                    curr_it,
                    len(test_loader),
                    l2h_loss_D.item(),
                    l2h_loss_D_real.item(),
                    l2h_loss_D_fake.item(),
                    l2h_loss_G.item(),
                    l2h_loss_G_GAN.item(),
                    l2h_loss_pixel.item(),
                ))

    def validate(self):
        pass

    def finalize(self):
        """
        Finalize all the operations of the 2 Main classes of the process the operator and the data loader
        :return:
        """
        self.logger.info(
            "Please wait while finalizing the operation.. Thank you")
        self.save_checkpoint()
        self.summary_writer.export_scalars_to_json("{}all_scalars.json".format(
            self.config.summary_dir))
        self.summary_writer.close()
        self.dataloader.finalize()


if __name__ == "__main__":
    config_dir = config.process_config('configurations/train_config.json')
    gan = Combined_GAN(config_dir)
import face_model
import argparse
import cv2
import sys
import numpy as np
import os
from tqdm import *
import imgaug as ia
from imgaug import augmenters as iaa
from bunch import Bunch
from config import process_config

config = process_config("./config.json")
args = Bunch(config.pretrained_model)
model = face_model.FaceModel(args)

sometimes = lambda aug: iaa.Sometimes(0.8, aug)
seq = iaa.Sequential([
    iaa.Fliplr(0.5),
    sometimes(
        iaa.OneOf([
            iaa.Grayscale(alpha=(0.0, 1.0)),
            iaa.AddToHueAndSaturation((-20, 20)),
            iaa.Add((-20, 20), per_channel=0.5),
            iaa.Multiply((0.5, 1.5), per_channel=0.5),
            iaa.GaussianBlur((0, 2.0)),
            iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5),
            iaa.Sharpen(alpha=(0, 0.5), lightness=(0.7, 1.3)),
            iaa.Emboss(alpha=(0, 0.5), strength=(0, 1.5))
        ]))
])
예제 #8
0
        default='1e5',
        help=
        'How often to print the step to the terminal (may be in scientific notation)'
    )
    parser.add_argument('--delay',
                        type=int,
                        default=0,
                        help='Number of milliseconds between steps')
    parser.add_argument('--pause',
                        action='store_true',
                        default=False,
                        help='Start the game in a paused state')
    args = parser.parse_args()

    # Process config (if there is one) and update args
    process_config(args)

    # Set width and height
    set_width(args.width)
    set_height(args.height)

    # Get flea class
    args.flea_class = get_flea(args.flea_name)

    # Convert to float then int to allow for scientific notation
    args.display_frequency = int(float(args.display_frequency))
    args.print_frequency = int(float(args.print_frequency))

    run_simulation(args.num_rows, args.num_cols, args.flea_class,
                   args.num_fleas, args.flea_rows, args.flea_cols,
                   args.init_directions, args.square_colors, args.image,
            noise = torch.randn(data_high.size(0), 1)
            noise, _ = self.to_var(noise)
            # We figured out a mistake at very last moment in our high-to-low generator's pixel loss.
            # For pixel loss we should provide generated_lr image and donwssampled lr image of original
            # image. We passed generated lr image and actual lr image. So we dont have weights for
            # high-to-low model. Just for the sake of outputs we are doing some twik which is not
            # correct but though we are doing this.
            #gen_int_lr = self.h2l_G(data_input_high, noise)
            gen_hr = self.l2h_G(data_input_low)
            path_int = os.path.join(self.test_file,
                                    img_name.split('.')[0] + '_int_lr.jpg')
            path_final = os.path.join(self.test_file,
                                      img_name.split('.')[0] + '_final_hr.jpg')
            vutils.save_image(data_input_low.data, path_int, normalize=True)
            #vutils.save_image(gen_hr.data, path_final, normalize=True)


if __name__ == '__main__':
    try:
        mode = sys.argv[1]
    except:
        print(
            'Missing param value for mode. \nUsage: Possible values for mode are h2l, l2h, combined.'
        )
        sys.exit(1)
    config_dir = config.process_config(
        'F:/Study/2nd_Semester/CV/Project/temp/configurations/test_config.json'
    )
    gan = TestCode(config_dir, mode)
    gan.test()
예제 #10
0
            for tag, meter in state['model'].meters.items():
                file_id = 'pre_finetune_{}'.format(tag)
                logger.log(file_id, state['epoch'], meter.value()[0])

    engine.hooks['on_start'] = on_start
    engine.hooks['on_sample'] = on_sample
    engine.hooks['on_forward'] = on_forward
    engine.hooks['on_start_epoch'] = on_start_epoch
    engine.hooks['on_end_epoch'] = partial(on_end_epoch, {
        'best_val': best_val,
        'wait': 0
    })
    engine.train(model,
                 loaders['train'],
                 maxepoch=config['optim']['epochs'],
                 optimizer=optimizer)

    model.reset_meters()
    if os.path.exists(best_model_path):
        model.load_state_dict(torch.load(best_model_path))
    if loaders['test'] is not None:
        log_meters('test', logger, engine.test(model, loaders['test']))
    return model


if __name__ == '__main__':
    config = process_config()
    model_init = get_model(config)
    loaders = load_data(config)
    model = train(model_init, loaders, config)
예제 #11
0
 def __init__(self, config_path):
     self.initialled = False
     with open(config_path, 'r', encoding='utf-8') as config_file:
         self.config = process_config(config_path)
예제 #12
0
파일: taskland.py 프로젝트: ftzm/TaskLand
import actions
import utils

__location__ = os.path.realpath(
    os.path.join(os.getcwd(), os.path.dirname(__file__)))

defaults = {
    'list_location': 'todo.txt',
    'default_command': 'h',
    'default_view': 'hide o p_id c_id a',
    'archive_location': 'archive.txt',
    'archive_automatically': 'false',
    'archive_delay': '2'
    }

settings = config.process_config(__location__, defaults)


def open_list():
    """open and read the task list file"""
    list_location = os.path.dirname(__file__) + "/" + settings['list_location']
    try:
        with open(list_location, 'r') as f:
            file = f.readlines()
    except FileNotFoundError:
        with open(list_location, 'w+') as f:
            file = f.readlines()
    return file


def print_projects():