コード例 #1
0
def main(args_obj):

    # parse the config json file
    args, _ = get_config_from_json(args_obj.args)

    if not os.path.exists(os.path.join(args.exp_log_path, args.exp_name)):
        os.makedirs(os.path.join(args.exp_log_path, args.exp_name, 'logs'))
        os.makedirs(os.path.join(args.exp_log_path, args.exp_name, 'model'))
        os.makedirs(
            os.path.join(args.exp_log_path, args.exp_name, 'tensorboard'))
        os.makedirs(os.path.join(args.exp_log_path, args.exp_name, 'output'))

    args.log_dir = os.path.join(args.exp_log_path, args.exp_name, 'logs')
    args.model_dir = os.path.join(args.exp_log_path, args.exp_name, 'model')
    args.tensorboard_dir = os.path.join(args.exp_log_path, args.exp_name,
                                        'tensorboard')
    args.output_dir = os.path.join(args.exp_log_path, args.exp_name, 'output')

    setup_logging(args.log_dir, args.mode)

    #Create the Agent and pass all the configuration to it then run it..
    agent_class = globals()[args.agent]
    agent = agent_class(args)
    agent.run()
    agent.finalize()
コード例 #2
0
def run_multi():
    # Get the arguments
    args = get_args()
    config, _ = get_config_from_json(args.config)
    values_sn = config.exp.vals
    values_train = config.exp.vals
    values_init = config.exp.vals
    params = config.exp.params
    section = config.exp.section
    # Spectral Normalization
    for i in values_sn:
        # Mode
        for j in values_train:
            # Init
            for k in values_init:
                config[section][params[0]] = i
                config[section][params[1]] = j
                config[section][params[2]] = k
                config.exp.name = args.experiment + "_{}{}{}".format(int(i), int(j), int(k))
                process_config(config)
                create_dirs(
                    [
                        config.log.summary_dir,
                        config.log.checkpoint_dir,
                        config.log.step_generation_dir,
                        config.log.log_file_dir,
                        config.log.codebase_dir,
                    ]
                )
                # Copy the model code and the trainer code to the experiment folder
                run(config, args)
                tf.reset_default_graph()
コード例 #3
0
ファイル: main.py プロジェクト: lqiqiqi/SRP_SmartMeter
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    # try:
    args = get_args()
    config, _ = get_config_from_json(args.config)
    # except:
    #     print("missing or invalid arguments")
    #     exit(0)

    if config.gpu_mode is True and not torch.cuda.is_available(
    ):  #虽然开启gpu模式,但是找不到GPU
        raise Exception("No GPU found, please run without --gpu_mode=False")

    # create an instance of the model you want
    # model = Net(config)
    model = torch.nn.DataParallel(Net(config), device_ids=[0, 1])

    # set the logger
    # log_dir = os.path.join(config.save_dir, 'logs_'+config.exp_name)
    # if not os.path.exists(log_dir):
    #     os.makedirs(log_dir)
    # logger = Logger(log_dir)
    logger = None

    train_indices, test_indices = shuffle()
    # create your data generator
    data_train = DataGenerator(config, 'train').load_dataset()
    # create your data generator
    data_test = DataGenerator(config, 'test').load_dataset()

    # create trainer and pass all the previous components to it
    trainer = Trainer(model, config, data_train, logger, data_test)
    trainer.train_test()
コード例 #4
0
def process_config_UtsClassification_bayes_optimization(
        json_file, learning_rate, model_name, batch_size, num_epochs=100):
    config, _ = get_config_from_json(json_file)

    config.model.name = model_name
    config.model.learning_rate = learning_rate
    config.trainer.num_epochs = num_epochs
    config.trainer.batch_size = batch_size

    config.callbacks.tensorboard_log_dir = os.path.join(
        "experiments", time.strftime("%Y-%m-%d/", time.localtime()),
        config.exp.name, config.dataset.name, config.model.name,
        "tensorboard_logs", "lr=%s,epoch=%s,batch=%s" %
        (config.model.learning_rate, config.trainer.num_epochs,
         config.trainer.batch_size))
    config.callbacks.checkpoint_dir = os.path.join(
        "experiments", time.strftime("%Y-%m-%d/", time.localtime()),
        config.exp.name, config.dataset.name, config.model.name,
        "%s-%s-%s" % (config.model.learning_rate, config.trainer.num_epochs,
                      config.trainer.batch_size), "checkpoints/")
    config.log_dir = os.path.join(
        "experiments", time.strftime("%Y-%m-%d/", time.localtime()),
        config.exp.name, config.dataset.name, config.model.name,
        "%s-%s-%s" % (config.model.learning_rate, config.trainer.num_epochs,
                      config.trainer.batch_size), "training_logs/")
    config.result_dir = os.path.join(
        "experiments", time.strftime("%Y-%m-%d/", time.localtime()),
        config.exp.name, config.dataset.name, config.model.name,
        "%s-%s-%s" % (config.model.learning_rate, config.trainer.num_epochs,
                      config.trainer.batch_size), "result/")
    return config
コード例 #5
0
def process_config(json_file):
    config, _ = get_config_from_json(json_file)

    config.summary_dir = os.path.join(sys.path[0], "../experiments",
                                      config.exp_name, "summary/")
    config.img_dir = os.path.join(sys.path[0], "../experiments",
                                  config.exp_name, "images/")

    config_induce = EasyDict(config.config_induce)

    config_induce.exp_name = config.exp_name
    config_induce.summary_dir = config.summary_dir
    config_induce.checkpoint_dir = os.path.join(sys.path[0], "../experiments",
                                                config.exp_name, "checkpoint/")
    config_induce.pseudo_data_dir = os.path.join(sys.path[0], "../experiments",
                                                 config.exp_name,
                                                 "pseudo_data_dir/")

    config_cla = EasyDict(config.config_cla)

    config_cla.exp_name = config.exp_name
    config_cla.data_numpy_pkl = os.path.join(config_induce.pseudo_data_dir,
                                             "data_numpy.pkl")

    return config, config_induce, config_cla
コード例 #6
0
 def __init__(self, model_path):
     self.config = get_config_from_json('.//config.json')
     self.load_path = model_path
     self.val_data_dir = self.config.paths.val_data_dir
     self.test_data_dir = self.config.paths.test_data_dir
     self.model = tf.keras.models.load_model(
         self.load_path, custom_objects=get_custom_objects())
     print("Model loaded succesfully from " + self.load_path)
コード例 #7
0
def process_config(json_file):
    config, _ = get_config_from_json(json_file)
    config.summary_dir = os.path.join(sys.path[0], "../experiments",
                                      config.exp_name, "summary/")
    config.checkpoint_dir = os.path.join(sys.path[0], "../experiments",
                                         config.exp_name, "checkpoint/")
    config.pseudo_data_dir = os.path.join(sys.path[0], "../experiments",
                                          config.exp_name, "pseudo_data_dir/")

    return config
コード例 #8
0
ファイル: main_sk.py プロジェクト: tactycHQ/Mercury2
def main():
    # Processing config file
    config = get_config_from_json('.\\utils\\config.json')

    # Processing data
    X_train, Y_train, X_val, Y_val, X_test, Y_test, num_train_features, num_train_samples, num_val_samples, num_test_samples = getData(mypath, config)

    svm = SVC(kernel='linear',verbose=1)
    print("SVC started")
    svm.fit(X_train,Y_train)
    print("fit complete")
    answer = svm.score(X_test,Y_test)
    print("answer:/n",answer)
    print("Model completed")
コード例 #9
0
ファイル: main.py プロジェクト: tactycHQ/Mercury3
def main():
    # Processing config file
    config = get_config_from_json('.\\utils\\config.json')

    # Processing data
    train_dataset, val_dataset, test_dataset, num_train_features, num_train_samples, num_val_samples, num_test_samples = getData(
        mypath, config)

    # Creating an empty model
    dense_model = DenseModel(num_train_features, config)
    load_flag = config.experiment.load

    # load model from h5 file
    if load_flag == True:
        try:
            print('Loading saved model')
            dense_model.load(".\\h5 models\\" + model_version)
            results = dense_model.model.evaluate(
                test_dataset,
                steps=int(num_test_samples / (config.model.batch_size)))
            print('test loss, test acc:', results)
        except Exception as ex:
            print(ex)
            print("Invalid model file name provided")

    # build and train and save a new model
    elif load_flag == False:
        try:
            dense_model.build_model()
            print('Create the trainer')
            trainer = Trainer(dense_model.model,
                              train_dataset,
                              val_dataset,
                              config,
                              steps_per_epoch=int(num_train_samples /
                                                  config.model.batch_size),
                              val_steps=int(num_val_samples /
                                            config.model.batch_size))
            print('Start training the model.')
            trainer.train()
            dense_model.save(".\\h5_models\\" + model_version)
        except Exception as ex:
            print(ex)
            print("Unable to create new model")
    else:
        print("Invalid load flag in config file")

    logging.info('---------Successful execution---------')
コード例 #10
0
def run():
    # Get the arguments
    args = get_args()

    config, _ = get_config_from_json(args.config)
    config.exp.name = args.experiment
    config = process_config(config)
    # create the experiments dirs
    create_dirs([
        config.log.summary_dir,
        config.log.checkpoint_dir,
        config.log.step_generation_dir,
        config.log.log_file_dir,
        config.log.codebase_dir,
    ])

    # Copy the model code and the trainer code to the experiment folder
    copy_codebase(config)

    l = Logger(config)
    logger = l.get_logger(__name__)
    # Set the random seed
    tf.random.set_random_seed(config.data_loader.random_seed)
    # Create the tensorflow session
    sess = tf.Session()
    # Create the dataloader
    data = create("data_loader." + config.data_loader.name)(config)
    # Create the model instance
    model = create("models.{}.".format(config.data_loader.image_size) +
                   config.model.name)(config)
    # Create the summarizer Object
    summarizer = create("utils." + config.log.name)(sess, config)
    # Create the trainer
    trainer = create("trainers." + config.trainer.name)(sess, model, data,
                                                        config, summarizer)
    # Load model if exists
    model.load(sess)
    # Train the model
    if args.train:
        trainer.train()
    # Test the model
    if config.trainer.test_at_end:
        trainer.test()
    logger.info("Experiment has ended.")
コード例 #11
0
def process_config_UtsClassification_bayes_optimization(json_file, params):
    config, _ = get_config_from_json(json_file)

    config.model.type = params['type']
    config.model.convfilt = params['convfilt']
    config.model.ksize = params['ksize']
    config.model.depth = params['depth']
    config.model.drop = params['drop']

    config.model.params = params

    config.callbacks.tensorboard_log_dir = os.path.join(
        "experiments", time.strftime("%Y-%m-%d/",
                                     time.localtime()), config.exp.name,
        config.dataset.name, config.model.name, "tensorboard_logs",
        "lr=%s,epoch=%s,batch=%s,type=%s,convfilt=%s,ksize=%s,depth=%s,drop=%s"
        % (config.model.learning_rate, config.trainer.num_epochs,
           config.trainer.batch_size, config.model.type, config.model.convfilt,
           config.model.ksize, config.model.depth, config.model.drop))
    config.callbacks.checkpoint_dir = os.path.join(
        "experiments", time.strftime("%Y-%m-%d/",
                                     time.localtime()), config.exp.name,
        config.dataset.name, config.model.name, "%s-%s-%s-%s-%s-%s-%s-%s" %
        (config.model.learning_rate, config.trainer.num_epochs,
         config.trainer.batch_size, config.model.type, config.model.convfilt,
         config.model.ksize, config.model.depth, config.model.drop),
        "checkpoints/")
    config.log_dir = os.path.join(
        "experiments", time.strftime("%Y-%m-%d/",
                                     time.localtime()), config.exp.name,
        config.dataset.name, config.model.name, "%s-%s-%s-%s-%s-%s-%s-%s" %
        (config.model.learning_rate, config.trainer.num_epochs,
         config.trainer.batch_size, config.model.type, config.model.convfilt,
         config.model.ksize, config.model.depth, config.model.drop),
        "training_logs/")
    config.result_dir = os.path.join(
        "experiments", time.strftime("%Y-%m-%d/",
                                     time.localtime()), config.exp.name,
        config.dataset.name, config.model.name, "%s-%s-%s-%s-%s-%s-%s-%s" %
        (config.model.learning_rate, config.trainer.num_epochs,
         config.trainer.batch_size, config.model.type, config.model.convfilt,
         config.model.ksize, config.model.depth, config.model.drop), "result/")
    return config
コード例 #12
0
def myconfig():
    args = get_args()
    config, _ = get_config_from_json(args.config)
    exp_name = config.exp_name
    model_name = config.model_name
    data_dir = config.data_dir
    num_threads = config.num_threads
    num_channels = config.num_channels
    scale_factor = config.scale_factor
    num_epochs = config.num_epochs
    save_epochs = config.save_epochs
    batch_size = config.batch_size
    test_batch_size = config.test_batch_size
    save_dir = config.save_dir
    lr = config.lr
    gpu_mode = config.gpu_mode
    load_model=  config.load_model

    d = 56  # out channels of first layer
    s = 32  # out channels of hidden layer
    m = 4  # number of layer of hidden layer block
コード例 #13
0
ファイル: main_test.py プロジェクト: lqiqiqi/SRP_SmartMeter
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    # try:
    args = get_args()
    config, _ = get_config_from_json(args.config)
    # except:
    #     print("missing or invalid arguments")
    #     exit(0)

    if config.gpu_mode is True and not torch.cuda.is_available(
    ):  #虽然开启gpu模式,但是找不到GPU
        raise Exception("No GPU found, please run without --gpu_mode=False")

    # create an instance of the model you want
    model = Net(config)

    # set the logger
    log_dir = os.path.join(config.save_dir, 'logs_' + config.exp_name)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    # logger = Logger(log_dir)
    logger = None

    # create your data generator
    # data_train = DataGenerator(config, 'debug').load_dataset()
    # create your data generator
    data_test = DataGenerator(config, 'test').load_dataset()
    data_train = None

    # create trainer and pass all the previous components to it
    # trainer = Trainer(model, config, data_train, logger, data_test)
    # trainer.train_test()

    # # create tester and pass all the previous components to it
    # 使用最后一个模型:在trainer.py中使用load_model函数
    # 使用非最后一个模型:在base_model模块中指定特定模型,并在trainer.py中使用load_spec_model函数
    tester = Tester(model, config, data_train, logger, data_test)
    with torch.no_grad():
        tester.test()
コード例 #14
0
def main():
    # capture the config path from the runtime arguments
    # then process the json configuration file
    args = get_args()
    print("Reading config from {}".format(args.config))
    config, _ = get_config_from_json(args.config)
    # add summary and model directory
    config = update_config_by_summary(config)
    # if to remove the previous results, set -d 1 as a parameter
    print('Whether to del the previous saved model', args.delete)
    if args.delete == '1':
        # delete existing checkpoints and summaries
        print('Deleting existing models and logs from:')
        print(config.summary_dir, config.checkpoint_dir)
        remove_dir(config.summary_dir)
        remove_dir(config.checkpoint_dir)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    """Load data"""
    # load global word, position and tag vocabularies
    word_vocab = load_vocab(path=config.datadir + config.word_vocab_path,
                            mode='word')
    position_vocab = load_vocab(path=config.datadir + config.pos_vocab_path,
                                mode='pos')
    tag_vocab = load_vocab(path=config.datadir + config.tag_vocab_path,
                           mode='tag')
    config.word_vocab_size = len(word_vocab)
    config.pos_vocab_size = len(position_vocab)
    config.tag_vocab_size = len(tag_vocab)

    print('word vocab size:', config.word_vocab_size)

    # create your data generator to load train data
    x_path = config.datadir + config.train_path
    train_loader = DataLoader(config, x_path, word_vocab, position_vocab,
                              tag_vocab)
    train_loader.load_data()
    # update the max length for each patient and each visit to be used in lstm
    train_max_patient_len = train_loader.max_patient_len
    train_max_visit_len = train_loader.max_visit_len

    # create your data generator to load valid data
    x_path = config.datadir + config.valid_path
    valid_loader = DataLoader(config, x_path, word_vocab, position_vocab,
                              tag_vocab)
    valid_loader.load_data()
    valid_max_patient_len = valid_loader.max_patient_len
    valid_max_visit_len = valid_loader.max_visit_len

    # create your data generator to load test data
    x_path = config.datadir + config.test_path
    test_loader = DataLoader(config, x_path, word_vocab, position_vocab,
                             tag_vocab)
    test_loader.load_data()
    test_max_patient_len = test_loader.max_patient_len
    test_max_visit_len = test_loader.max_visit_len

    print("The max patient lengths for train, valid and test are {}, {}, {}".
          format(train_max_patient_len, valid_max_patient_len,
                 test_max_patient_len))
    print("The max visit lengths for train, valid and test are {}, {}, {}".
          format(train_max_visit_len, valid_max_visit_len, test_max_visit_len))

    # select the maximum lengths of visits and codes as the size of lstm
    config.max_patient_len = max(
        [train_max_patient_len, valid_max_patient_len, test_max_patient_len])
    config.max_visit_len = max(
        [train_max_visit_len, valid_max_visit_len, test_max_visit_len])

    train_loader.pad_data(config.max_patient_len, config.max_visit_len)
    valid_loader.pad_data(config.max_patient_len, config.max_visit_len)
    test_loader.pad_data(config.max_patient_len, config.max_visit_len)

    # add num_iter_per_epoch to config for trainer
    config.train_size = train_loader.get_datasize()
    config.valid_size = valid_loader.get_datasize()
    config.test_size = test_loader.get_datasize()
    config.num_iter_per_epoch = int(config.train_size / config.batch_size)
    print("The sizes for train, valid and test are {}, {}, {}".format(
        config.train_size, config.valid_size, config.test_size))
    """Run model"""
    # create tensorflow session
    # specify only using one GPU
    tfconfig = tf.ConfigProto(device_count={'GPU': 1})
    # allow the dynamic increase of GPU memory
    tfconfig.gpu_options.allow_growth = True
    # limit the maximum of GPU usage as 0.5
    #tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.5
    with tf.Session(config=tfconfig) as sess:
        # create an instance of the model you want
        model = Model(config)
        # create tensorboard logger
        logger = Logger(sess, config)
        # create trainer and pass all the previous components to it
        trainer = PredTrainer(sess, model, train_loader, valid_loader,
                              test_loader, config, logger)
        # load model if exists
        model.load(sess)
        # here you train your model
        trainer.train()
コード例 #15
0
def main():

    os.environ['CUDA_VISIBLE_DEVICES'] = '2'

    # capture the config path from the run arguments
    # then process the json configuration file
    # try:
    args = get_args()
    config, _ = get_config_from_json(args.config)

    bayes_config = {
        "algorithm": "bayes",
        "parameters": {
            # "model": {"type": "categorical", "values": ['cnn','mlp']},
            "learning_rate": {
                "type": "float",
                "min": 0.001,
                "max": 0.01
            },
            # "batch_size": {"type": "integer", "min": 16, "max": 32},
            # "num_epochs": {"type": "integer", "min": 5, "max": 10},
        },
        "spec": {
            "maxCombo": 10,
            "objective": "minimize",
            "metric": "test_f1",
            "minSampleSize": 100,
            "retryAssignLimit": 0,
        },
        "trials": 1,
        "name": "Bayes",
    }
    opt = Optimizer(bayes_config,
                    api_key=config.comet_api_key,
                    project_name=config.exp_name)
    for exp in opt.get_experiments():
        args = get_args()
        # config = process_config_UtsClassification_bayes_optimization(args.config, exp.get_parameter('model'),exp.get_parameter('learning_rate'),
        #                                                              exp.get_parameter('batch_size'), exp.get_parameter('num_epochs'))
        config = process_config_UtsClassification_bayes_optimization(
            args.config, exp.get_parameter('learning_rate'))
        # except:
        #     print("missing or invalid arguments")
        #     exit(0)

        # create the experiments dirs

        print('Create the data generator.')
        data_loader = UtsClassificationDataLoader(config)

        print('Create the model.')

        model = UtsClassificationModel(config, data_loader.get_inputshape(),
                                       data_loader.get_nbclasses())

        print('Create the trainer')
        trainer = UtsClassificationTrainer(model.model,
                                           data_loader.get_train_data(),
                                           config)

        print('Start training the model.')
        trainer.train()

        # print('Create the evaluater.')
        # evaluater = UtsClassificationEvaluater(trainer.best_model, data_loader.get_test_data(), data_loader.get_nbclasses(),
        #                                        config)
        #
        # print('Start evaluating the model.')
        # evaluater.evluate()

        exp.log_metric("test_f1", trainer.best_model_val_loss)

        print('done')
コード例 #16
0
def main():
    detector_config = get_config_from_json('configs/orb_config.json')
    drone_config = get_config_from_json('configs/drone_config.json')
    glob_config = get_config_from_json('configs/glob_config.json')
    landscape_map = Image.read(glob_config.path_to_map)
    detector = HashPointDetector(detector_config)
    drone = Drone(drone_config, glob_config, detector, landscape_map)
    drone.load_hashes(detector.config.path_to_hashes)
    visualizer = Visualizer(landscape_map)
    visualizer.draw_trajectory(xsmooth, ysmooth)
    position = drone.get_position()

    shift = random.random() * 100.0

    count_step = 0
    for node in nodes:
        destination = node['point']
        detect_position = drone.get_position_from_image()
        while drone.get_distance_to_point(destination) > 32:
            prev_position = drone.get_position()
            visualizer.draw_line_moving(prev_position, position)
            position = drone.get_position()
            if count_step % 4 == 0:
                print('Take picture')
                position = drone.get_position()
                start_time = time.time()
                detect_position = drone.get_position_from_image()
                mse_value = mse(position, detect_position)
                detect_real_errors.append(mse_value)
                visualizer.dashboard_text(4, 'MSE: {}'.format(mse_value))
                time_detecting = time.time() - start_time
                visualizer.dashboard_text(5, 'Time: {}'.format(time_detecting))
                times_detecting.append(time_detecting)
            visualizer.init_debug_frame()
            visualizer.draw_destination(destination)
            picture_params = drone.get_picture_params()
            visualizer.update_landscape_debug_frame()
            drone.move(destination)
            visualizer.draw_drone_moving(picture_params, position,
                                         drone.real_pos, detect_position)
            visualizer.draw_drone_picture(drone.get_picture())
            visualizer.dashboard_text(
                0, 'Speed: {}'.format(float(drone.get_speed())))
            visualizer.dashboard_text(
                1, 'Height: {}'.format(
                    200.0 + 15 * math.sin(shift + time.time() * 0.017) +
                    7 * math.cos(shift * 13 + time.time() * 0.15)))
            visualizer.dashboard_text(
                2, 'Angle: {}'.format(math.degrees(abs(drone.get_rotation()))))
            visualizer.dashboard_text(
                3, 'Keypoints: {}'.format(drone.cur_candidates))
            visualizer.dashboard_text(
                6, 'Average MSE: {}'.format(median(detect_real_errors)))
            visualizer.dashboard_text(
                7, 'Average time: {}'.format(median(times_detecting)))
            count_step += 1
            cv2.waitKey(50)
    print('Detect-real MSE: ',
          sum(detect_real_errors) / len(detect_real_errors))
    print('Mean time detecting: ', sum(times_detecting) / len(times_detecting))
    visualizer.wait()
コード例 #17
0
import tensorflow as tf

from utils.config import get_args
from utils.config import get_config_from_json
from utils.utils import judge_and_new
import os

if __name__ == '__main__':
    # tf.app.run()
    ''' Dynamic configs '''
    try:
        args = get_args()
        config = get_config_from_json(args.config)
    except:
        raise Exception("missing or invalid arguments")
    ''' Static configs '''
    os.environ["CUDA_VISIBLE_DEVICES"] = "0, 2"
    judge_and_new(os.path.join(config.work_root, config.project_name))
    config.ckpt_path = os.path.join(config.work_root, config.project_name,
                                    'save')
    judge_and_new(os.path.join(config.work_root, config.project_name, 'save'))
    # config.timeline_path = os.path.join(config.work_root, config.project_name, 'timelines')
    # judge_clean_new(os.path.join(config.work_root, config.project_name, 'timelines'))
    config.tensorb_path = os.path.join(config.work_root, config.project_name,
                                       'tensorb')
    judge_and_new(
        os.path.join(config.work_root, config.project_name, 'tensorb'))

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.85,
                                allow_growth=True)
    # gpu_options = tf.GPUOptions(allow_growth=True)
コード例 #18
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file

    # args = get_args()

    config = process_config(args.config_folder+"train_config.json")

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])

    # create your data generator
    cfg = get_config_from_json(args.config_folder+"data_config.json")[0]
    data = DataGenerator(cfg)

    IMAGENET_FILEPATH = os.path.join(cfg.BASE_PATH, cfg.IMAGENET_FILEPATH)
    MODEL_FILEPATH = os.path.join(cfg.BASE_PATH, cfg.MODEL_FILEPATH)

    # create novelty GAN
    novelty_gan = NoveltyGAN(generator_output_classes=cfg.OUTPUT_CLASSES, fcn=True, upsampling=False, alpha=0.25,
                             imagenet_filepath=IMAGENET_FILEPATH, model_filepath=MODEL_FILEPATH)

    # create the trainer object
    trainer = NoveltyGANTrainer(novelty_gan, data, config)

    # loss = trainer.train_step_gan()
    # loss = trainer.train_step(train_on_real_data=True)
    # print("loss = ", loss)

    for epoch_id in range(config.num_epochs):
        loss = trainer.train_epoch(epoch_id)

    # novelty_gan.gan.summary()

    # Sanity check: works.
    """
    for batch in data.next_batch(3):
        labels_batch = novelty_gan.generator.predict_on_batch(batch[0])
        print("Image from batch:", batch[0].shape)
        print("Ground truth labels:", batch[1].shape)
        print("Predicted labels:", labels_batch.shape)
        # isReal = novelty_gan.discriminator.predict_on_batch([batch[1], batch[0]])
        # isReal = novelty_gan.discriminator.predict_on_batch([labels_batch, batch[0]])
        isReal = novelty_gan.gan.predict_on_batch(batch[0])
        print("discriminator prediction for ground-truth:", isReal.shape)
    """

    # Yet another sanity check for training.
    """
    for batch in data.next_batch(3):
        img_batch = batch[0]
        labels_batch = batch[1]
        # predicted_labels_batch = novelty_gan.generator.predict_on_batch(img_batch)
        target = np.zeros((3, 2))
        target[:, 0] = 1
        loss = novelty_gan.discriminator.train_on_batch([labels_batch, img_batch], target)
        print(loss)
    """

    if 0:
        print('_________________')
        print(cfg)
        print('_________________')
コード例 #19
0
ファイル: cla_main.py プロジェクト: sungsoo-ahn/pseudo-data
def process_config(json_file):
    config, _ = get_config_from_json(json_file)

    return config
コード例 #20
0
from metrics.metrics import BinaryMetrics, MulticlassMetrics
from sklearn.preprocessing import LabelBinarizer
from utils.config import get_config_from_json
from pathlib import Path
import numpy as np
import os

config, _ = get_config_from_json('configs/config_metrics.json')
results, _ = get_config_from_json(config.results.file)

n_classes = len(config.dataset.classes)

prob_predictions = np.array(results.prob_predictions)
ground_truth = np.array(results.ground_truth)
categorical_ground_truth = ground_truth

n_samples = ground_truth.size

if( len(ground_truth.shape) == 1):
    # prob_predictions_1d = prob_predictions
    # prob_predictions = np.zeros((n_samples, ground_truth.max()+1), dtype=float)
    # prob_predictions[np.arange(n_samples), 0] = 1 - prob_predictions_1d[np.arange(n_samples)]
    # prob_predictions[np.arange(n_samples), 1] = prob_predictions_1d[np.arange(n_samples)]
    predictions = np.round(prob_predictions + 0.01).astype(int)
    categorical_ground_truth = np.zeros((n_samples, ground_truth.max()+1), dtype=int)
    categorical_ground_truth[np.arange(n_samples), ground_truth] = 1
elif( len(ground_truth.shape) == 2):
    predictions = np.argmax(prob_predictions, axis=-1)
    ground_truth = np.argmax(categorical_ground_truth, axis=-1)

if(n_classes == 2):
コード例 #21
0
 def test_get_config_from_json(self):
     from utils.config import get_config_from_json
     json_file = '/home/cl3720/2019-concept2vec/deep-learning-skeleton/./experiments/configs.json'
     config, _ = get_config_from_json(json_file)
     self.assertTrue('data' in config)
コード例 #22
0
    index = 0
    with Path(path).open('r') as f:
        for l in f:
            key = l.strip() if mode == 'word' else int(l.strip())
            vocab[key] = index
            index += 1
    return vocab


if __name__ == '__main__':
    """
    load data and build vocab
    """
    # load data from disk
    config_path = '../configs/text_ner.json'
    config = get_config_from_json(config_path)
    datadir = '../data_samples/ner/'
    words_path = datadir + 'corpus_words.txt'
    tags_path = datadir + 'corpus_tags.txt'
    X, Y = [], []
    with Path(words_path).open('rb') as f:
        for l in f:
            X.append(l.strip().split())
    word_vocab = build_vocab(X)
    with Path(tags_path).open('rb') as f:
        for l in f:
            Y.append(l.strip().split())
    tag_vocab = build_vocab(Y)
    # add padding token
    if PAD_WORD not in word_vocab: word_vocab.add(PAD_WORD)
    if PAD_TAG not in tag_vocab: tag_vocab.add(PAD_TAG)
コード例 #23
0
    for tile in tiles:
        dx, dy, _, _ = tile['coordinates']
        kp, des = detector.create_features(tile['image_cls'].img)
        for key_point in kp:
            key_point.pt = (key_point.pt[0] + dx, key_point.pt[1] + dy)
        image_bin = binImages(kp, des)
        hashes = get_hashes(image_bin)
        result.append({
            'tile_coordinates': tile['coordinates'],
            'hashes': hashes
        })
    return result


if __name__ == '__main__':
    detector_config = get_config_from_json('configs/orb_config.json')
    glob_config = get_config_from_json('configs/glob_config.json')
    landscape_map = Image.read(glob_config.path_to_map)
    detector = HashPointDetector(detector_config)
    tiles = landscape_map.get_tiles(tile_size=glob_config.tile_size)
    landscape_hashes = create_hashes(tiles, detector)
    with open(detector.config.path_to_hashes, 'wb') as des_file:
        pickle.dump(landscape_hashes, des_file)
    print('Done!')

# def create_hashes(tiles, detector):
#     result = []
#     for tile in tiles:
#         dx, dy, _, _ = tile['coordinates']
#         sub_tiles = tile['image_cls'].get_tiles(tile_size=(
#             glob_config.tile_size[0] // 2,
コード例 #24
0
ファイル: main.py プロジェクト: AndreyRysistov/BeeVSWasp
from dataloaders.dataloader import DataLoader
from models.conv_model import ConvModel
from trainers.conv_model_trainer import ConvModelTrainer
from utils.config import get_config_from_json
from utils.args import get_args


if __name__ == "__main__":
    args = get_args()  # parse args
    config, _ = get_config_from_json(args.config)  # load config
    try:
        args = get_args() #parse args
        config, _ = get_config_from_json(args.config) #load config
    except FileNotFoundError:
        print("File {} don't exists".format(args.config))
        exit(0)
    except Exception:
        print(("Missing or invalid arguments"))
        exit(0)
    dataloader = DataLoader('datasets/data', config) #create data_loader
    train_data, valid_data = dataloader.create_datasets()
    model = ConvModel(config)
    trainer = ConvModelTrainer(config, model)
    trainer.train(train_data, valid_data)
コード例 #25
0
ファイル: client.py プロジェクト: tactycHQ/Pulse1
 def __init__(self, model_path):
     self.config = get_config_from_json('.//config.json')
     self.load_path = model_path
     self.vocab_path = self.config.paths.vocab_path
     self.val_data_dir = self.config.paths.val_data_dir
     self.test_data_dir = self.config.paths.test_data_dir
コード例 #26
0
 def __init__(self):
     self.config = get_config_from_json('.//config.json')
     self.callbacks = []
     self.init_callbacks()
     self.train()