Ejemplo n.º 1
0
def main(config):
    prepare_dirs_and_logger(config)
    save_config(config)

    if config.is_train:
        from trainer import Trainer
        if config.dataset == 'line':
            from data_line import BatchManager
        elif config.dataset == 'ch':
            from data_ch import BatchManager
        elif config.dataset == 'kanji':
            from data_kanji import BatchManager
        elif config.dataset == 'baseball' or\
             config.dataset == 'cat':
            from data_qdraw import BatchManager

        batch_manager = BatchManager(config)
        trainer = Trainer(config, batch_manager)
        trainer.train()
    else:
        from tester import Tester
        if config.dataset == 'line':
            from data_line import BatchManager
        elif config.dataset == 'ch':
            from data_ch import BatchManager
        elif config.dataset == 'kanji':
            from data_kanji import BatchManager
        elif config.dataset == 'baseball' or\
             config.dataset == 'cat':
            from data_qdraw import BatchManager
        
        batch_manager = BatchManager(config)
        tester = Tester(config, batch_manager)
        tester.test()
Ejemplo n.º 2
0
def main(*args):
    config, unparsed = get_config()

    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    trainer = Trainer(config)
    trained_model = "/home/exx/Documents/Hope/BEGAN-tensorflow-regressor-20170811-GED-eclipse-ptx-traffic-z3/models/GAN/GAN_2017_11_15_16_52_17/experiment_41293.ckpt"
    aa = np.load(
        '/home/exx/Documents/Hope/BEGAN-tensorflow-regressor-20170811-GED-eclipse-ptx-traffic/attack_data_new/eps150_[1101]->[1110]_FGSM_and_feat_squeeze_data.npz'
    )
    valid_x = np.asarray(aa['FGSM_features'], 'float32')
    valid_y = aa['orig_target'][:, 1:]
    # aa = np.load(
    #     '/home/exx/Documents/Hope/BEGAN-tensorflow-regressor-20170811-GED-eclipse-ptx-traffic/traffic_sign_dataset2.npz')
    # valid_x = aa['data']
    # valid_y = (aa['label'][1:]).tolist()*len(aa['data'])
    testing_paralist = [[[0.25], [0.6], [0.7], [0.001], [7.0], [-5.0]]]

    for para_list in testing_paralist:
        para_list[2][0] = para_list[2][0] - para_list[1][0]
        log_err = trainer.test(np.expand_dims(valid_x, 1), valid_y,
                               trained_model, para_list, 'eps-150', 1)
    print('done')
Ejemplo n.º 3
0
def main(config):
    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
        do_shuffle = True
    else:
        setattr(config, 'batch_size', 1)
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image
        do_shuffle = False

    data_loader = get_loader(data_path, config.batch_size,
                             config.input_scale_size, config.data_format,
                             config.split)
    mask_loader = get_mask_loader(config.mask_dir, config.batch_size,
                                  config.mask_scale_size)
    trainer = Trainer(config, data_loader, mask_loader)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        trainer.test_context_encoder()
Ejemplo n.º 4
0
def main(config):
    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
        do_shuffle = True
    else:
        setattr(config, 'batch_size', 64)
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image
        do_shuffle = False

    trainer = Trainer(config)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception("[!] You should specify `load_path` to load a pretrained model")
        trainer.test()
Ejemplo n.º 5
0
def main(config):
    prepare_dirs_and_logger(config)

    tf.set_random_seed(config.random_seed)

    train_data_loader, train_label_loader_list = get_loader(
        config.dataset, config.data_path, config.batch_size, 0, 'train', True)
    '''
  if config.is_train:
    test_data_loader, test_label_loader_list = get_loader(
      config.dataset, config.data_path, config.batch_size_test, config.testset_start_index, 'test', False)
  else:
    test_data_loader, test_label_loader_list = get_loader(
      config.dataset, config.data_path, config.batch_size_test, 0, config.split, False)
   '''

    test_data_loader = None
    test_label_loader_list = []
    trainer = Trainer(config, train_data_loader, train_label_loader_list,
                      test_data_loader, test_label_loader_list)
    if config.is_train:
        save_config(config)
        results = trainer.train()
    else:
        if not config.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        #trainer.test()

    save_result(results, config)

    #results = load_result('2018_11_17_13_53_10')
    plot_result(results, config)
Ejemplo n.º 6
0
def main(config):
    prepare_dirs_and_logger(config)
    loader = get_loader(config.data_dir, config.dataset, config.batch_size)
    loader_3d = get_3d_loader(config.batch_size)
    trainer = Trainer(config, loader)
    save_config(config)
    trainer.train()
Ejemplo n.º 7
0
def main(config):
    prepare_dirs_and_logger(config)

    torch.manual_seed(config.random_seed)
    if config.num_gpu > 0:
        torch.cuda.manual_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
    else:
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image

    a_data_loader, b_data_loader = get_loader(
            data_path, batch_size, config.input_scale_size,
            config.num_worker, config.skip_pix2pix_processing)

    trainer = Trainer(config, a_data_loader, b_data_loader)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception("[!] You should specify `load_path` to load a pretrained model")
        trainer.test()
Ejemplo n.º 8
0
def main(config):
    with open("data/DF_test_data/p_pairs_test.p", 'rb') as f:
        pn_pairs = pickle.load(f)

    prepare_dirs_and_logger(config)

    if config.gpu > -1:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
        os.environ["CUDA_VISIBLE_DEVICES"] = str(config.gpu)

    config.data_format = 'NHWC'

    if 1 == config.model:
        trainer = PG2(config)
        trainer.init_net()
    elif 11 == config.model:
        trainer = PG2_256(config)
        trainer.init_net()

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        # if not config.load_path:
        #     raise Exception("[!] You should specify `load_path` to load a pretrained model")
        trainer.test()
Ejemplo n.º 9
0
def main(*args):
    config, unparsed = get_config()

    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    trainer = Trainer(config)
    trained_model = "/home/exx/Documents/Hope/BEGAN-tensorflow-regressor-20170811-GED-eclipse-ptx-traffic/models/GAN/GAN_2017_11_01_15_04_47/experiment_185390.ckpt"
    testing_dataset_path = ''
    testing_dataset = '/home/exx/Documents/Hope/BEGAN-tensorflow-regressor-20170811-GED-eclipse-ptx-traffic/traffic_sign_dataset2.npz'

    aa = np.load(
        './attack_data/TrafficSign_FGSM_and_CPPN_Datasets/eps150_FGSM_and_feat_squeeze_data.npz'
    )
    valid_x = np.asarray(aa['FGSM_features'], 'float32')
    valid_y = aa['orig_target']
    para_list = [[0.25], [0.5, 0.65, 0.8], [0.1, 0.2, 0.3],
                 [0.001, 0.01, 0.02], [3., 5., 7.], [-3., -4., -5., -6., -7.]]
    para_list[1] = [para_list[1][args[1]]]
    para_list[2] = [para_list[2][args[2]]]
    para_list[3] = [para_list[3][args[0]]]
    log_err = trainer.valid(np.expand_dims(valid_x, 1), valid_y, trained_model,
                            para_list, args[0] + 1)
Ejemplo n.º 10
0
def main(config):
    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
        do_shuffle = True
    else:
        setattr(config, 'batch_size', 64)
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image
        do_shuffle = False

    config.pct = [20, 80]
    print('\n\nUsing thinning factor according to {}:{} ratio.\n\n'.format(
        config.pct[0], config.pct[1]))
    trainer = Trainer(config)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        trainer.test()
Ejemplo n.º 11
0
def main(_):
    """
    The main process of train and test
    :param _:
    :return:
    """
    prepare_dirs_and_logger(config)
    if not config.task.lower().startswith('binpacking'):
        raise Exception("[!] Task should starts with binpacking")

    if config.max_enc_length is None:
        config.max_enc_length = config.max_data_length
    if config.max_dec_length is None:
        config.max_dec_length = config.max_data_length

    tf.set_random_seed(config.random_seed)

    # A jar is used to calculate the objective function value, so start the JVM first.
    path = os.getcwd()
    jar_path = path + "/idad-solver-binpacking/idad-solver-binpacking_least_area.jar"
    start_jvm(jar_path)

    trainer = Trainer(config)
    save_config(config.model_dir, config)

    if config.is_train:
        trainer.train()
    else:
        if not config.load_path:
            raise Exception("[!] You should specify `load_path` to load a pretrained model")
        trainer.test()

    tf.logging.info("Run finished.")

    shutdown_jvm()
Ejemplo n.º 12
0
def main(config):
    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
        do_shuffle = True
    else:
        setattr(config, 'batch_size', 64)
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image
        do_shuffle = False

    dir_source = 'blahblahblah'
    data_path = config.data_dir  #+ '/' + config.dataset
    # (root, batch_size, source_mix, classes, split_name, data_format = 'NHWC', seed = None)
    images_train = get_loader(data_path, config.batch_size, config.source_mix,
                              config.data_classes)
    trainer = Trainer(config, images_train)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        trainer.test()
Ejemplo n.º 13
0
def main(config):
    prepare_dirs_and_logger(config)
    save_config(config)

    if config.is_train:
        from trainer import Trainer
        if config.dataset == 'line':
            from data_line import BatchManager
        elif config.dataset == 'ch':
            from data_ch import BatchManager
        elif config.dataset == 'kanji':
            from data_kanji import BatchManager
        elif config.dataset == 'baseball' or\
             config.dataset == 'cat':
            from data_qdraw import BatchManager

        batch_manager = BatchManager(config)
        trainer = Trainer(config, batch_manager)
        trainer.train()
    else:
        from tester import Tester
        if config.dataset == 'line':
            from data_line import BatchManager
        elif config.dataset == 'ch':
            from data_ch import BatchManager
        elif config.dataset == 'kanji':
            from data_kanji import BatchManager
        elif config.dataset == 'baseball' or\
             config.dataset == 'cat':
            from data_qdraw import BatchManager

        batch_manager = BatchManager(config)
        tester = Tester(config, batch_manager)
        tester.test()
Ejemplo n.º 14
0
def main(_):
  prepare_dirs_and_logger(config)

  if not config.task.lower().startswith('tsp'):
    raise Exception("[!] Task should starts with TSP")

  if config.max_enc_length is None:
    config.max_enc_length = config.max_data_length
  if config.max_dec_length is None:
    config.max_dec_length = config.max_data_length

  rng = np.random.RandomState(config.random_seed)
  tf.set_random_seed(config.random_seed)

  trainer = Trainer(config, rng)
  save_config(config.model_dir, config)

  if config.is_train:
    trainer.train()
  else:
    if not config.load_path:
      raise Exception("[!] You should specify `load_path` to load a pretrained model")
    trainer.test()

  tf.logging.info("Run finished.")
Ejemplo n.º 15
0
def main(config, model):
    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    train_data_loader, train_label_loader = get_loader(config.data_path,
                                                       config.batch_size,
                                                       config, 'train', True)

    if config.is_train:
        test_data_loader, test_label_loader = get_loader(
            config.data_path, config.batch_size_test, config, 'test', False)
    else:
        test_data_loader, test_label_loader = get_loader(
            config.data_path, config.batch_size_test, config, config.split,
            False)

    trainer = Trainer(config, train_data_loader, train_label_loader,
                      test_data_loader, test_label_loader, model)
    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        trainer.test()
def main(config):
    prepare_dirs_and_logger(config)

    if config.gpu>-1:
        os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   # see issue #152
        os.environ["CUDA_VISIBLE_DEVICES"]=str(config.gpu)

    config.data_format = 'NHWC'

    if 1==config.model: 
        trainer = PG2(config)
        trainer.init_net()
    elif 11==config.model:
        trainer = PG2_256(config)
        trainer.init_net()
        
    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        # if not config.load_path:
        #     raise Exception("[!] You should specify `load_path` to load a pretrained model")
        input_path = './hunter_test/df002.png'
        pose_path = './hunter_test/ultraman1.npy'
        x = cv2.imread(input_path)
        p = np.load(pose_path)
        #pp = p[0:18,:,:]
        trainer.generate_hunter(x, p)
Ejemplo n.º 17
0
def main(config):
    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    load()
    train_data_loader, train_label_loader, train_loc_loader, train_mask_loader = get_loader(
        config.data_path, config.batch_size, 0, 'train', True)
    test_data_loader, test_label_loader, test_loc_loader, test_mask_loader = get_loader(
        config.data_path, config.batch_size_test, 5, 'train', True)

    trainer = Trainer(config, train_data_loader, train_label_loader,
                      train_loc_loader, train_mask_loader, test_data_loader,
                      test_label_loader, test_loc_loader, test_mask_loader)
    print("loaded trainer")
    if config.is_train:
        save_config(config)
        trainer.train()
        print("finished train")
    else:
        if not config.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        trainer.test()
Ejemplo n.º 18
0
def main(_):
  prepare_dirs_and_logger(config)

  if not config.task.lower().startswith('tsp'):
    raise Exception("[!] Task should starts with TSP")

  if config.max_enc_length is None:
    config.max_enc_length = config.max_data_length
  if config.max_dec_length is None:
    config.max_dec_length = config.max_data_len

  rng = np.random.RandomState(config.random_seed)
  tf.set_random_seed(config.random_seed)

  trainer = Trainer(config, rng)
  save_config(config.model_dir, config)

  if config.is_train:
    trainer.train()
  else:
    if not config.load_path:
      raise Exception("[!] You should specify `load_path` to load a pre-trained model")
    trainer.test()

  tf.logging.info("Run finished.")
Ejemplo n.º 19
0
def main(config):
    # NOTE: Run this in shell first.
    print(
        'NOTE: FIRST RUN:\n"source ~/began/BEGAN-tensorflow/tf1.1/bin/activate"'
    )

    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
        do_shuffle = True
    else:
        setattr(config, 'batch_size', 64)
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image
        do_shuffle = False

    dir_loader = 'train8020'
    #dir_loader = 'train6040'
    #dir_loader = 'train4060'
    #dir_loader = 'train2080'
    #dir_loader = 'train1090'
    #dir_loader = 'train0510'
    #dir_loader = 'trainBig0510'
    #dir_loader = 'train_all_1090'
    #dir_loader = 'train_small_5050'
    config.pct = [int(dir_loader[-4:][:2]), int(dir_loader[-4:][2:])]

    dir_target = 'train5050'
    data_loader = get_loader(data_path,
                             config.batch_size,
                             config.scale_size,
                             config.data_format,
                             config.split,
                             target=dir_loader)
    data_loader_target = get_loader(data_path,
                                    config.batch_size,
                                    config.scale_size,
                                    config.data_format,
                                    config.split,
                                    target=dir_target)
    trainer = Trainer(config, data_loader, data_loader_target)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        trainer.test()
Ejemplo n.º 20
0
def main(config):
    prepare_dirs_and_logger(config)

    torch.manual_seed(config.random_seed)
    if len(config.gpu_ids) > 0:
        torch.cuda.manual_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
        do_shuffle = True
    else:
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image
        do_shuffle = False

    if config.src_names is not None:
        config.src_names = config.src_names.split(",")

    if config.load_attributes is not None:
        config.load_attributes = config.load_attributes.split(",")

    if config.filter_by_pop is not None:
        fn_filter = lambda fnames: fn_filter_contains(
            fnames, any_of=config.filter_by_pop)
    else:
        fn_filter = None

    normalize = config.normalize if not config.normalize_channels else normalize_channels

    data_loader = get_loader(data_path,
                             config.split,
                             batch_size,
                             config.input_scale_size,
                             num_workers=config.num_worker,
                             shuffle=do_shuffle,
                             load_attributes=config.load_attributes,
                             flips=config.flips,
                             rotate_angle=config.rotate_angle,
                             take_log=config.take_log,
                             normalize=normalize,
                             use_channels=config.use_channels,
                             fn_filter=fn_filter)

    trainer = Trainer(config, data_loader)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        trainer.test()
Ejemplo n.º 21
0
def main(config):
    # NOTE: Run this in shell first.
    #if tf.__version__[:3] != '1.1':
    #    sys.exit('***NOTE!***: FIRST RUN:\n"source ~/began/BEGAN-tensorflow/tf1.1/bin/activate"')
    # NOTE: Other setup requirements.
    print('\nREQUIREMENTS:\n  1. The file "user_weights.npy" should '
          'contain the user-provided labels for images in /user_images.\n')
    #print('Press "c" to continue.\n\n')
    #pdb.set_trace()

    prepare_dirs_and_logger(config)

    # Alert if config.log_dir already contains files.
    if len(os.listdir(config.log_dir)) > 0:
        print('log_dir contains files, continue? (c)')
        pdb.set_trace()

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
        do_shuffle = True
    else:
        setattr(config, 'batch_size', 64)
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image
        do_shuffle = False

    data_loader = get_loader(data_path,
                             config.batch_size,
                             config.scale_size,
                             config.data_format,
                             split_name='train',
                             grayscale=config.grayscale)
    images_user, images_user_weights = load_user(config.dataset,
                                                 data_path,
                                                 config.scale_size,
                                                 config.data_format,
                                                 grayscale=config.grayscale)
    trainer = Trainer(config, data_loader, images_user, images_user_weights)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        trainer.test()
Ejemplo n.º 22
0
def main(config):
    # NOTE: Run this in shell first.
    if np.float(tf.__version__[:3]) < 1.7:
        sys.exit('***NOTE!***: FIRST RUN:\n"source ~/virtualenvironment/tf1.7/bin/activate"')

    prepare_dirs_and_logger(config)

    # Alert if config.log_dir already contains files.
    if not config.load_existing:
        if len(os.listdir(config.log_dir)) > 1:
            print(('log dir ({}) contains files besides checkpoints dir, '
                   'continue? (c)').format(config.log_dir))
            pdb.set_trace()


    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
        do_shuffle = True
    else:
        setattr(config, 'batch_size', 64)
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image
        do_shuffle = False

    if config.dataset == 'mnist':
        #directory_to_load = '7s_train'
        #directory_to_load = '8s_train'
        directory_to_load = '1to9_train'
    elif config.dataset == 'birds':
        directory_to_load = 'images_preprocessed'
    elif config.dataset == 'celeba':
        directory_to_load = 'train'

    inputs = get_loader(
        data_path, config.batch_size, config.scale_size,
        config.data_format, split_name=directory_to_load,
        )
    trainer = Trainer(config, inputs)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception("[!] You should specify `load_path` to load a pretrained model")
        trainer.test()
Ejemplo n.º 23
0
def main(_):
    prepare_dirs_and_logger(config)

    if not config.task.lower().startswith('tsp'):
        raise Exception("[!] Task should starts with TSP")

    if config.max_enc_length is None:
        config.max_enc_length = config.max_data_length
    if config.max_dec_length is None:
        config.max_dec_length = config.max_data_length

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    model = Model(config)

    batch_size = config.batch_size

    train_enc_seq, train_target_seq, train_enc_seq_length, train_target_seq_length = gen_data('data/tsp10.txt')

    eval_enc_seq,eval_target_seq,eval_enc_seq_length,eval_target_seq_length = train_enc_seq[-batch_size:], \
                                                                              train_target_seq[-batch_size:], \
                                                                              train_enc_seq_length[-batch_size:], \
                                                                              train_target_seq_length[-batch_size:]

    train_enc_seq, train_target_seq, train_enc_seq_length, train_target_seq_length= train_enc_seq[: -batch_size], \
                                                                                  train_target_seq[:-batch_size], \
                                                                                  train_enc_seq_length[:-batch_size], \
                                                                                  train_target_seq_length[:-batch_size]

    test_enc_seq, test_target_seq, test_enc_seq_length, test_target_seq_length = gen_data('data/tsp10_test.txt')

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for step in range(min(config.max_step,len(train_enc_seq)//batch_size)):
            train_batch={
                'enc_seq': train_enc_seq[step * batch_size:(step + 1) * batch_size],
                'enc_seq_length': train_enc_seq_length[step * batch_size:(step + 1) * batch_size],
                'target_seq': train_target_seq[step * batch_size:(step + 1) * batch_size],
                'target_seq_length': train_target_seq_length[step * batch_size:(step + 1) * batch_size]
            }
            loss = model.train(sess,train_batch)
            print(str(step) + " train loss : " + str(loss))

            if step > 0 and step % config.eval_step == 0:
                eval_batch = {
                    'enc_seq': eval_enc_seq,
                    'enc_seq_length': eval_enc_seq_length,
                    'target_seq': eval_target_seq,
                    'target_seq_length': eval_target_seq_length
                }
                eval_loss = model.eval(sess,eval_batch)
                print(str(step) + " eval loss : " + str(eval_loss))
Ejemplo n.º 24
0
def get_trainer():
    print('tf: resetting default graph!')
    tf.reset_default_graph()#for repeated calls in ipython


    ####GET CONFIGURATION####
    #TODO:load configurations from previous model when loading previous model
    ##if load_path:
        #load config files from dir
    #except if pt_load_path, get cc_config from before
    #overwrite is_train, is_pretrain with current args--sort of a mess

    ##else:
    config,_=get_config()
    cc_config,_=get_cc_config()
    dcgan_config,_=get_dcgan_config()
    began_config,_=get_began_config()

    ###SEEDS###
    np.random.seed(config.seed)
    #tf.set_random_seed(config.seed) # Not working right now.

    prepare_dirs_and_logger(config)
    if not config.load_path:
        print('saving config because load path not given')
        save_configs(config,cc_config,dcgan_config,began_config)

    #Resolve model differences and batch_size
    if config.model_type:
        if config.model_type=='dcgan':
            config.batch_size=dcgan_config.batch_size
            cc_config.batch_size=dcgan_config.batch_size # make sure the batch size of cc is the same as the image model
            config.Model=CausalGAN.CausalGAN
            model_config=dcgan_config
        if config.model_type=='began':
            config.batch_size=began_config.batch_size
            cc_config.batch_size=began_config.batch_size # make sure the batch size of cc is the same as the image model
            config.Model=CausalBEGAN.CausalBEGAN
            model_config=began_config

    else:#no image model
        model_config=None
        config.batch_size=cc_config.batch_size

        if began_config.is_train or dcgan_config.is_train:
            raise ValueError('need to specify model_type for is_train=True')

    #Interpret causal_model keyword
    cc_config.graph=get_causal_graph(config.causal_model)

    #Builds and loads specified models:
    trainer=Trainer(config,cc_config,model_config)
    return trainer
Ejemplo n.º 25
0
def main(config):
    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
        do_shuffle = True
    else:
        setattr(config, 'batch_size', 64)
        if config.test_data_path is None:
            data_path = config.data_path
            #print('\n data_path ',data_path,'\n')
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image
        do_shuffle = False

    data_path_real = './img_real/'
    data_path_gen = './img_gen/'

    data_loader = get_loader(data_path, config.batch_size,
                             config.input_scale_size, config.data_format,
                             config.split)
    data_loader_real = get_loader(  # change paths !
        data_path_real, config.batch_size, config.input_scale_size,
        config.data_format, config.split)
    data_loader_gen = get_loader(  # change paths !
        data_path_gen, config.batch_size, config.input_scale_size,
        config.data_format, config.split)
    trainer = Trainer(config, data_loader, data_loader_real, data_loader_gen)

    if config.is_train:
        save_config(config)
        trainer.train()
        return None
    else:
        if not config.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        trainer.test()
        img_real = imread(config.img_real)
        img_gen = imread(config.img_gen)
        d_loss = trainer.d_loss_out(img_real, img_gen)
        print("d_loss : ", d_loss)
        return d_loss
Ejemplo n.º 26
0
def main(config):
    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)
    loader = get_loader(config.data_dir, config.batch_size)
    trainer = Trainer(config, loader)
    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        trainer = Trainer(config, loader)
        if not config.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        trainer.test()
Ejemplo n.º 27
0
def main(config):
    prepare_dirs_and_logger(config)
    #pdb.set_trace()
    torch.manual_seed(config.random_seed)
    if config.num_gpu > 0:
        torch.cuda.manual_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
    else:
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image

    data_path = './NUS-WIDE/'
    data_loader = get_loader(data_path,
                             batch_size,
                             config.input_scale_size,
                             config.num_worker,
                             ifshuffle=True,
                             TEST=False,
                             FEA=True)

    test_data_loader = get_loader(data_path,
                                  batch_size,
                                  config.input_scale_size,
                                  config.num_worker,
                                  ifshuffle=False,
                                  TEST=True,
                                  FEA=True)

    torch.cuda.set_device(5)
    trainer = Trainer(config, data_loader, test_data_loader)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        trainer.test()
Ejemplo n.º 28
0
def main(config):
    prepare_dirs_and_logger(config)
    batch_manager = BatchManager(config)
    preprocess_path('data/qdraw/baseball/train/4503641325043712.svg', 128, 128,
                    batch_manager.rng)
    preprocess_overlap('data/qdraw/baseball/train/4503641325043712.svg', 128,
                       128, batch_manager.rng)

    # thread test
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    sess_config.allow_soft_placement = True
    sess_config.log_device_placement = False
    sess = tf.Session(config=sess_config)
    batch_manager.start_thread(sess)

    x, y = batch_manager.batch()
    if config.data_format == 'NCHW':
        x = nhwc_to_nchw(x)
    x_, y_ = sess.run([x, y])
    batch_manager.stop_thread()

    if config.data_format == 'NCHW':
        x_ = x_.transpose([0, 2, 3, 1])

    if config.archi == 'path':
        b_ch = np.zeros([config.batch_size, config.height, config.width, 1])
        x_ = np.concatenate((x_ * 255, b_ch), axis=-1)
    else:
        x_ = x_ * 255
    y_ = y_ * 255

    save_image(x_, '{}/x_fixed.png'.format(config.model_dir))
    save_image(y_, '{}/y_fixed.png'.format(config.model_dir))

    # random pick from parameter space
    x_samples, x_gt, y_gt, sample_list = batch_manager.random_list(8)
    save_image(x_gt, '{}/x_gt.png'.format(config.model_dir))
    save_image(y_gt, '{}/y_gt.png'.format(config.model_dir))

    with open('{}/sample_list.txt'.format(config.model_dir), 'w') as f:
        for sample in sample_list:
            f.write(sample + '\n')

    print('batch manager test done')
Ejemplo n.º 29
0
def written2all(written):
    print(written)
    STANDARD_L = [0, '', '', '', 'B204', 'BD90', '', '', '', '']
    TARGETS = [0, '', '', '', 'D1A0', 'D3F0', '', '', '', '']
    output = {}
    for code, path in written.items():
        config, _ = get_config()
        config.data_path = path
        config.dataset = hex(int(code)).split('x')[1].upper()
        config.sample_per_image = 1
        uniclass = UnicodeToKoreanClass(code)

        # pth 없으면 넘김
        if STANDARD_L[uniclass] == '':
            continue

        print('./pths/%s' % (STANDARD_L[uniclass]))
        config.load_path = './pths/%s' % (STANDARD_L[uniclass])

        target = TARGETS[uniclass]
        name_pth = '%s_%s' % (STANDARD_L[uniclass], target)
        prepare_dirs_and_logger(config)
        torch.manual_seed(config.random_seed)
        if config.num_gpu > 0:
            torch.cuda.manual_seed(config.random_seed)
        data_path = config.data_path
        batch_size = config.sample_per_image

        a_data_loader = get_loader_a(data_path, batch_size,
                                     config.input_scale_size,
                                     config.num_worker,
                                     config.skip_pix2pix_processing)
        tester = Tester(config, a_data_loader, name_pth)
        img_AB = tester.test()

        dir = './output'
        if not os.path.exists(dir):
            os.makedirs(dir)

        vutils.save_image(img_AB, './output/{}.png'.format(code))
        with open('./output/{}.png'.format(code), "rb") as image_file:
            b64Image = base64.b64encode(image_file.read()).decode('utf-8')
            output[code] = b64Image

    return output
Ejemplo n.º 30
0
def main(config):
    prepare_dirs_and_logger(config)
    batch_manager = BatchManager(config)
    preprocess_path('data/kanji/train/0f9a8.svg_pre', 64, 64, batch_manager.rng)
    preprocess_overlap('data/kanji/train/0f9a8.svg_pre', 64, 64, batch_manager.rng)

    # thread test
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    sess_config.allow_soft_placement = True
    sess_config.log_device_placement = False
    sess = tf.Session(config=sess_config)
    batch_manager.start_thread(sess)

    x, y = batch_manager.batch()
    if config.data_format == 'NCHW':
        x = nhwc_to_nchw(x)
    x_, y_ = sess.run([x, y])
    batch_manager.stop_thread()

    if config.data_format == 'NCHW':
        x_ = x_.transpose([0, 2, 3, 1])

    if config.archi == 'path':
        b_ch = np.zeros([config.batch_size,config.height,config.width,1])
        x_ = np.concatenate((x_*255, b_ch), axis=-1)
    else:
        x_ = x_*255
    y_ = y_*255

    save_image(x_, '{}/x_fixed.png'.format(config.model_dir))
    save_image(y_, '{}/y_fixed.png'.format(config.model_dir))


    # random pick from parameter space
    x_samples, x_gt, y_gt, sample_list = batch_manager.random_list(8)
    save_image(x_gt, '{}/x_gt.png'.format(config.model_dir))
    save_image(y_gt, '{}/y_gt.png'.format(config.model_dir))

    with open('{}/sample_list.txt'.format(config.model_dir), 'w') as f:
        for sample in sample_list:
            f.write(sample+'\n')

    print('batch manager test done')
Ejemplo n.º 31
0
def main(config):
    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
        do_shuffle = True
    else:
        setattr(config, 'batch_size', 64)
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image
        do_shuffle = False

    config.pct = [20, 80]
    print('\n\nUsing thinning factor according to {}:{} ratio.\n\n'.format(
        config.pct[0], config.pct[1]))

    data_loader_user = get_loader(
        data_path, config.batch_size, config.scale_size,
        config.data_format, split_name=config.split, target='user', n=500)
    data_loader_train = get_loader(
        data_path, config.batch_size, config.scale_size,
        config.data_format, split_name=config.split, target='train', mix='2080')
    data_loader_test = get_loader(
        data_path, config.batch_size, config.scale_size,
        config.data_format, split_name=config.split, target='test')

    trainer = Trainer(config, data_loader_user, data_loader_train, data_loader_test)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception("[!] You should specify `load_path` to load a pretrained model")
        trainer.test()
Ejemplo n.º 32
0
def main(config):
    prepare_dirs_and_logger(config)

    if config.gpu > -1:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
        os.environ["CUDA_VISIBLE_DEVICES"] = str(config.gpu)

    config.data_format = 'NHWC'

    if 1 == config.model:  ################### original model_idx=223  Works Best #####################
        trainer = PG2(config)
        trainer.init_net()
    elif 11 == config.model:  ################### original model_idx=10208 #####################
        trainer = PG2_256(config)
        trainer.init_net()
    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        # if not config.load_path:
        #     raise Exception("[!] You should specify `load_path` to load a pretrained model")
        trainer.test()
Ejemplo n.º 33
0
def main(config):
    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
        do_shuffle=True
    else:
        setattr(config,'batch_size',64)
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image
        do_shuffle=False
    if config.dataset == 'mnist' :
        mnist = tf.keras.datasets.mnist
        (x_train,y_train),(x_test,y_test) = mnist.load_data()
        data_loader = batch_generator(x_train, config.batch_size, config.data_format)
    elif config.dataset == 'cifar10':
        cifar = tf.keras.datasets.cifar10
        (x_train,y_train),(x_test,y_test) = cifar.load_data()
        data_loader = batch_generator(x_train, config.batch_size, config.data_format)
    else:
        data_loader = get_loader(
            data_path, config.batch_size,config.scale_size, config.data_format)
    trainer = Trainer(config,data_loader)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception("[!] You should specify load_path to load a pretrained model")
        trainer.test()
Ejemplo n.º 34
0
def main(config):
    """
    config参数:Namespace(D_arch='DCGAN', batch_size=1, beta1=0.5, beta2=0.999, ckpt_path=None, conv_hidden_num=128, 
    d_lr=2e-05, data_dir='data', data_format='NCHW', dataset='DF_train_data', g_lr=2e-05, gamma=0.5, gpu=0, 
    grayscale=False, img_H=256, img_W=256, is_train=True, lambda_k=0.001, load_path='', log_dir='logs', 
    log_level='INFO', log_step=200, lr_update_step=50000, max_step=80, model=11, model_dir='path_to_directory_of_model',
     num_log_samples=3, num_worker=4, optimizer='adam', pretrained_path=None, random_seed=123, sample_per_image=64, 
     save_model_secs=1000, split='train', start_step=0, test_data_path=None, test_one_by_one=False, use_gpu=True, 
     z_num=2)

    """
    prepare_dirs_and_logger(config)

    if config.gpu > -1:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"   # see issue #152
        os.environ["CUDA_VISIBLE_DEVICES"] = str(config.gpu)

    config.data_format = 'NHWC'
    trainer = None
    if 1 == config.model:
        print("使用PG2(),即Market-1501 数据库,并初始化")
        trainer = PG2(config)
        trainer.init_net()
    elif 11 == config.model:
        print("使用PG2_256(),即DeepFashion数据库,并初始化")
        trainer = PG2_256(config)
        trainer.init_net()
        
    if config.is_train:
        print("开始训练")
        save_config(config)  # 存储参数到json文件
        trainer.train()  # 开始训练
    else:
        print("开始测试")
        if not config.load_path:
            raise Exception("[!] 没有指定 `load_path` 用于读取预训练的模型")
        trainer.test()
Ejemplo n.º 35
0
def main(config):
    prepare_dirs_and_logger(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    if config.is_train:
        data_path = config.data_path
        batch_size = config.batch_size
        do_shuffle = True
    else:
        #setattr(config, 'batch_size', 1)
        if config.test_data_path is None:
            data_path = config.data_path
        else:
            data_path = config.test_data_path
        batch_size = config.sample_per_image
        do_shuffle = False
    print(data_path)
    data_loader = get_loader(
            data_path, config.batch_size, config.input_scale_size,
            config.data_format, config.split)
    trainer = Trainer(config, data_loader)

    if config.is_train:
        save_config(config)
        trainer.train()
    else:
        if not config.load_path:
            raise Exception("[!] You should specify `load_path` to load a pretrained model")
        imagestring0="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEBD/2wBDAQMDAwQDBAgEBAgQCwkLEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBD/wAARCAEAANsDASIAAhEBAxEB/8QAHgABAAIBBQEBAAAAAAAAAAAAAAgJBwECBAUGAwr/xABFEAABAwMCBAMECAIIAwkAAAABAAIDBAURBhIHCCExQVFhCROBkRQVIjJxobHBI1IXM0JicoKy0RYkohg0RXODkqPC8P/EABoBAQACAwEAAAAAAAAAAAAAAAAFBgIDBAH/xAAqEQEAAgEDBAAFBQEBAAAAAAAAAQIDBAUREiExQRMiJVFSBhRxgbFiwf/aAAwDAQACEQMRAD8AtTREQEREBERAREQEREBEW0uABJQNzCMhwwPFdXUai09R1jLdVXqhgqpfuQSVDGyP/BpOSo587nNG7gBoyG16Zqo/+LL+x7KIP6/RoR0dOR5jqG+vXrhVJXLW+q7/AHmTUVfqa7Vt4mlD21G975XSE9XdyfzA9AtV8sU8t+LTTl7w/QTFLHJh0bg4eYK+oOVTtyv853EXhLquns+s77U3LTz6gU9wo6qVxlja4ge9j3ElpGc4HQq3exXi23q1Ul2tVUypo6+JtTTzRu3NkjeAWuBHTrnK9pki/hhkxThniXZotA4O7LVbGsREQEREBERAREQEREBERAREQEREBERAREQF835DT0z0W8HK8Vxj1mNA8MNU6wLgw2i1VFS13k5rDj/qIXkzERy9iOZ4U/8APRxNdxI5kNW1UFZ7yhs9VHYqENduaI4Bskc3/FJ7wn4LK3KJoawSaU+sa20QVEs0hcZXQhxAB6DJHQKDd1ulXeL3PcaiQmaomkq5HP6lziepPqVLjg1wyvVj09Sa4h1nPT0phZNFT0o92x2eu2THVxyoHcbcx54WnZ8czMxFeWVuZzgtoK7cObzqSjsNLQXOlp31VPWQARkytGdpPY5Axg/BZc9mVxXrtY8LqzQVzrHTVGnRFLTbiSWQTMyWjPgHhxA8NxA6YWE+OnDa66ls1y1patW1kApLXKTQmNskTjsOejgcZHiOq2eyVNdNrzVE0fvDTMslO12ScA+8GB/qWO2Xn1L3e8Ucd68LRwcDste62gdMrcOysKqQ1REQEREBERAREQEREBERAREQEREBERAREQaDuVH3nsubrfyva7LHEOqKFtMMD+eVjSfgCVIBxAB64Uc+fCpFPy1axBZE5slKyEGUZa18krGB/X+UFx/HC15Plo2Yoi2SI9qQqOJ0s9RUPGGgOiZnqPLP7qXnDq+3u78NLZYdOQ0t0lheGy00sr2naPuvY5niOvQ+aiTeamnpqKW3UTXDbGH++Jx0B7/HB6qS3ArQGudN1lI2emrYH1EgZE2Ju8Sk9thHfpjsoPW0vesX47rbteb9tkmszxDJXGziJduH/Bq4yapZR227XOB9FQUkUz5ZpctAL5C7sADnA6jB81lb2TnDrVFm09qPiFcqZ0NpvLYqGh3jDpnQvcJHejWn7PqVzOK3IBcON2iaKsvWqpLRqGjBfR++ZvY1rgMslHQjOB0b1GD5rkcANGc5fL5pscPLZpWw6hs9LVF8M/0yIYBOXOwXhzc4z2+Gcro0OH4Neq0d3FuuptrrzWk+E+GO3MB75C+i8RpHU2oZWR0utqW2WqumYHR01PW++PbqT0GPmvZxzMeOhUrF4tPyyrs0tXtL6ItPHutVmxEREBERAREQEREBERAREQEREBERAREQeY15rO16DsNRqC7bjFGQxjGjJkeezQP3UB+bLj1d+LOlTwzo3UFG2rr3VMj+pEVNDCTmQnxBc4g+YapXc2Ehi0JSY7G4NyPP7Dv9lCOs0RbLzpnWep7q73dPSU0dPLKOhZDh75cHzdiIfD1URqdRf91GGPCd0GmxzpZ1Fo+blBu62GCnkdBUVYeXUxlMmOj9rXYa0+I6dVcnwi4c0FgvGnrw1n/Lz2mOeA4Dg2V8Aa5np36KnOG3RS3mmsF0nlp6Sd4dSTSty9kLjggg9R3d0KvS4cVNjvekLLcdO18dVQR08bKeZn9oNbj4LbgnrnizDVxMcTEvWzOa1xbKSRnO0dQPD9lxrpu+r6gRDY6RhEYA6+f6LliIF2/G4k9cr4V9LJUywHJaIsnHnkY/3XXbxw5I6Z8o86ur6sanqnvqpPsBjAd56NA/3WduDGoptS6Jp6mrl95UUk0lLI4nqdpy0n12kKO3ESSeg1/e7f3bHIyRh8g6MHCylyw1cj7df6FzyWMqoph6FzMH/Sq5tua8a+2O09pTu64Ynb65K+v/AFnUd1uWgHYrVWpUo8CIiPRERAREQEREBERAREQEREBERAREQYc5pbeazhfU1Qbn6FV08p/Av2n/AFKLXBj6o1VcdXcLb7StmpbnQtr2Y6OO0tjkHwDmEKX/ADC7Bwg1MZD0FOwj8RIzChJy/wBY+LmHt0O3/vdqrIXHyAaHf/VvyUHqo6dfSY9rFoLTbbskT6dTxB5VdKaf4ZXcPpfputLrWMtWnmOO6aomkkbsew98jDy89sNz0UyeWvhpceGXC+3aYvEr31ED5SA49fdl52Z9cD816K3aS07drnR6iuNujmuNsbJFSyuH9Q14w7aPAkZ69+q9nC/qfHJyu6uKKWmYR2fUzekVbGRBvUeBK+c7j78gkHaAFyGnLsYHchdTUSuNTK0/zY7Lpjvw56947o1cXpYo+KF5i3gvdDTuIHrGFkXlYO6l1I/Paogbj/IT+6xLxYfjjDe8nJNPTYz4fwwsq8qp/gambnvUU7v+l4/ZVjSR9Un+1t18fR4/pIRvgty2N8FvVriOFJERF6CIiAiIgIiICIiAiIgIiICIiAiIgxbzKOczg5qHBxubA34GZmVETltswr+OlTcgCWWiyzPz5PkkZG0fJzvkpT8094jt3C2so3lpdcKmGFjT/dcJCfkz81grlQtzW1+rdSEAtldSUsb8eDQ97x83RqHzRF9fWPtCxaH5NsyT+U8JL6crmOqZ6R3Q/e/HC9HAftDCxpDdKiguL6mnDdxyMOGRgrtoNW3aQPAEAIje4YZjqGnzK7/iRCNvg7vbh+0uceu3JXl/rf3tW7IxucexXXN1Pd5Rl07MlpBGwDouPTSZnYMDqfArXbLzPEM64umszKO/FmqLuNd2jI6Ppqfv3/qwsx8qsLxT6mlc3ANRTsGfRhP7lYX4zMxxtaIupko4S8Dp0GVn7lqgjisF8mZ3kuYb8BDH/ufmoPSR9VlY9wt07PEfwzcAFqtjfBb1aVIgRER6IiICIiAiIgIiICIiAiIgIiICIiCLfONdnzVVo0815axtPNOf8Rw0H8l0nLNSmk4dzTk/ara+olz/AHWuZGB+S4/OFWe51zDvd0jtrCPQF5z+i7vhPa5dM6Ot1ic53vI6SGaYu6HfI9zyPn+ihsFOvXZLz/CxZctcO24qx7nl7KpcG1TmjuvtTPOX9/6p/b/CV0VddTDczC9oGR3XOorjCWTyCUZbC89T8P3XXfFLjjLEufA87emfLqudRSf8zGD2DsldFFcInu2tlb812tqnY6tia17SSR0z3yQP3WMUmJiYZWmJjhgLiNWQ3LjNeJWkn6uhhp8gehcf2UgeWOmldoivuMpO2tukr4+ncNaxn6tI+Ci/cqh0+sdYXYvz726zsDvRjtox6dCpm8GLM6w8NLFRvZte+nE8gI/tSOLv3UTtlZy66+X7JbeskYtBjxR7e6b4LetrQVuVmVEREQEREBERAREQEREBERAREQEREBEWhcAghTzjt+seJ1qscbsPuH0Chd57ZZ9p/IuWRaeobJUXCWLDQ2qigZg5GGMd3x26ub5rCfN7ru2aH482vVGpDO61WaupaqcRM3O2RQmTAHidw9PFQ742c/3ES83z6v4K36p01YA1z3SyUcJrKiYuy5254eWNDSzAGD0Kj9LTi+S3/X+JXV2mcOKkfj/qyTUMbjeYn7TgRgk4PkvhRE7a5oZuIpnDAB6Zc1U13vmB44Xy6R326cWtV1FwjcHMnNzlaWEdsBpDR8At915keP8AfA0XTjDqmXYcjZcHQjPqI9oJ9Tkrt8o+tpjtK4ehc6WcMDXbs9AQf2yvW2Bopri2eQbRAx0jifDaCevwBVIT+YHjq6B9P/TBq8RPaWuZ9cTkEHuOrl7nlVrdfam4xUFZHrG+tjtodV1BFfKffAgtDHZJ3Bxd1z4ZWjPauLHa8/Z0afFbNkrSvuVgukbRLqO/W21RNLpL1cTI9uO4kkL3E/g39T5Ke1HSCkpoqaNuGRMEbR5NHZRa5ZdOG462nv8AUQn3Nqpne6Jb0D5MAEf5d3zUrgMgHJXBsuPjDOX8pSf6hyxOorgjxWIbkRFMq+IiICIiAiIgIiICIiAiIgIiICIiDa44XwmeWt3E+fU9lyD5rodbGrbpa7OoHhs/0OXY7+V204P6rG3assqV6r1hX7zjWqLimzUtxt8TpC6qMFKWDq5oifEHfHuqrHAtJaMsB7tzhXD8J5LXqvWeo7PcoHOht9NUui3/AHPeCn6ehOH5HqFU5xP08dLcQ9S6cPT6su1XSADwayVwb+Sjdu6ppa1vul92pGO9MdfUQ8w5m4bRjJ6DJ6Z9V2Q0RqWXRVTxFpbdLJYKS4xWueqDciKokjdJG138ocGkZPTPTxXV9cjHf1VqHIdwEsXFPkT1rpS+0kT2a3r6tsUpaC6OaFjWwSjyLZGg/MKSiOUQqrha17snoOuPw64/ZTV5H9K08GmqrVbomCe41/0aN5PUsja0kD4uKhxebLX6ZvVfYLtEWVlrqpaOdrhgh8b3NcPmFJrlm4j0Vr0xprT8dfHBX0GsoQ6ndIN9TT1UTonbW98NcGuPwXBuNJyYJpCX2bLXHqYmyzvl/wBStsuopbHUACG6MGx3lK3JA+ILvkFJlvUBQx4S1Ed111Z2W6T3xFY0g7ujWhzs/k1TOaMNA8lp2XqjT9NvUs/1BStdX1R5mO7VERS6DEREBERAREQEREBERAREQEREBERBoc+C4tZA2pifTyx745Glrh5g9CuWtC0Huh39Iz3/AJQ7TPqie92m+1FDbppo6qalYSC+WMgsHQ9QQCCe+MDsFU1zvaYbpPmi4gW2KItifdPpLM9yJWB+f1+av0qwDH7sD7zgqXParWEWfmkqa+OMtZeLLQ1nb7x2viJ+ca1Y8VcXh05tVl1XHxZ54Q0ccuw3uegV23sw/dSco1hMYwPrK4h2P7X8Uqkdx2vzkDHXJ8FdT7K+d0vKjRxOyGw3uuY0HyLmn91uxtEq6/aJcOmcOua7V9NTwiOlvvub9TtaMN21DSX4/wDVbL+SwRonU02jtT2/U8FGypmtr3TQse4gCXY4Mf0/lcQ7HjhWCe2U0MKbUXDjiLDEM11JXWeocB1zE9ksefhNL/7VXDF3+KxyRE9pZY7zSeYWDcqnMKNRcwXCXQ9mr6qaASPF0qHZYKid9G8BmO52vfLnORnbjHdW3t7D8FQryAwOn5uOHAZnLbo958sCCQn8gr6h2C14cdcVemrLUZr6i/Xee7VERbWkREQEREBERAREQEREBERAREQEREBERB8Z2bsEeByqnvbHWI0/EzQGoXsDXVljqaXcPH3U4cB/8xVsyq59s9Wx/XXC+3Nx7xlJdZ3ee1z6Zo/MFCOys1zcv29932fn0V53s3NKHT/KVpF4mYXXd9XcnFoz9+ZzQD64YqMmlwma5uPskHr5L9DvKrppmkeXnh7YGQGE0+nqN72kYO+SMSOz65ecpHbwylGr2uunGXHlytN6Ld8lm1HTvDsfdbJHIx36j5KnlrjuHTwH6K832llhfeeUTV7mZLqGSkrOg7Bkzcn5Eqi8O2uIPfcV5LyErfZoW/6fzf6OJxinirqjt220kvX5kK8xUjeyzcf+13Yj7vINpuYz5fwe6u4HYLyCWqIiyeCIiAiIgIiICIiAiIgIiICIiAiIgIiICqg9syT/AEi8Oxn/AMCq/wA6lv8AsFa+qt/bOWaobd+GmoPd/wAF9JcqLd/fa+B4HycUFaMEQfMGPP2XdCv0u6Vhjh05ao4mhrGUFO1oA6ACNuAvzQN3ZO04OF+lXQdSavQ2nq5/ee00ch+MLT+6CLvtP+KFr0Hy03PTM8gNx1nO22UkAd9pzGkPleB5NaAPxc1UlEDdg9XEkk+uVLn2m/GOr4mcyVy0/BUudZ9FRizUkWct9+MPqJPxL3bfwjCiOxrXEZGcDOPNBYV7Irg1ebtxKvXGupaGWiwUMtppjnBlq59m4geIbGHZ9XBW3DsFHzkZ4Sf0Qct2k7BVU/ubjX0/1tX/AGcEzT4fg/g3a1SDHZBqiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgKEHta9E/wDEXLdR6ogh3z6Xv9PUOdt6tgmjkheM+W50R/yqb6wbzqadj1RyvcRrW+EyEWSWoYPEPixI0j5IPz84ILh2OD+i/SVoWYW7hnp6SpO36LYqR0gI7badpP6Ffm2c1zpXNacEjAx5kdAr7dX8VG2/kiruKFo31bhw/ZWQOiblxkfRtbkD0e7r5YKCj/jJqNureK+r9SsO5tzvdbVNPc7XTOx+PQD5rLnIZwAdx+4/Wa1XOmdJp2wuF4vBI+w+GJwLIT/5jw1uPLco5ufvcepJc45Oe5J7g/8A7xV2XswuCVLw35drdresoWsveuj9aTPe3D20oJbTM9BsG/8AzoJhwxNhYI4mBrGgBrQAAAOwAX1W1gw0DyW5AREQEREBERAREQEREBERAREQEREBERAREQF5zX+kqTXOjb3o6vqJYKa90E1DLJF9+NsjS0kevVejW1/3ThBSXrXkbtvDrXtwtV61VUXWgtVU4ljYAx1TAHZ2l2SBluWn1WbuNHOLpHidc+HvBrgXV1Vp0k2CSmv1uZSCGORpgLY6Rwx91pa8EdicHwXUc5PFWs4Q8wOrrNW2cXC3XOaCth2SBroS+Fm8dfM9cepUHLTq6+WbiDTa4gtzaL6XM66U8IJ2vw5wc0Hx+0HAn1UbxqZnJFp7ekz1aOK4rUjvHlMbhHyE6H4wcUPo01xutts8QfV1kdHK1u5oP9XGXNOzJI7K2XSWnLZo/TNr0nZYBBb7NRw0FJEDn3cETAyNufHDWjr4nKhp7N691/Eil1ZxHqoPo1HG+C10VOBkg7S+WQn1OwDy2nzU4GAbei6NHXJTFEZPLk3G+LJnmcMfK3IiLqcIiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgLRwy0harR3YoKUvaMXeC7cxep4YnNdNQ1zKN49BBCQPx+0ox3mtbLZ9L0EYaZKGlqopMdwH1DiB+Z/NZY5pb0bpx211eqmJ8okv9c7a/v0lcGj8B3WEqSlbFXi63CGq+gzO2NmY3DN4GSwOP2SeoOM5TmYe889pXAeyvvFq/ojvelaZjRV0VbDXTuB+82aMhufw9yfmput8VWD7Iutr36115TNdIaJtopXEOPZwncGZA6A4L/krPmEFvTzI/NP5eNyIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgLZK0ujc1pwSO/kt6IID833s34uJtTcNfcHK+Cg1BcJHT11pq3BtNWPd958Tz/AFcnjh32SfEL1vLzyB6Z0ny6XPhDxgoqG912p6g3G5GN24UVSGBkZp5MZa9jQDvHdxPgpkuYHdwPRGtDewA/BBHTlC5QqLlVotT08OqPr+a/VUPuqh1IIHxUkLXCKJw3ODnZe4lwwCfAKRbAQ3BGFuRAREQEREBERAREQEREBERB/9k="
        
        ###string1=base64.encodestring(imagestring0)
        imagestring1=imagestring0

        #trainer.test2(imagestring0,imagestring1)
        trainer.test3( imagestring0, 128, 40)