Exemplo n.º 1
0
def main():
    config = get_config()
    if config['train'] and not config['resume']:
        for key in ['folder_log', 'folder_out']:
            if os.path.exists(config[key]):
                raise FileExistsError(config[key])
            os.makedirs(config[key])
        with open(os.path.join(config['folder_out'], 'config.yaml'), 'w') as f:
            yaml.safe_dump(config, f)
    data_loaders, image_shape = get_data_loaders(config)
    config['image_shape'] = image_shape
    net = get_model(config)
    if config['train']:
        train_model(config, data_loaders, net)
    test_model(config, data_loaders, net)
    return
Exemplo n.º 2
0
def test_model(images_path, labels_path, model_path, remove_tmp_if_exist=True):
    ''''
            Test model where the graph was exported in pb format
    '''
    workspace_path = '/home/osboxes/Desktop/ML/Final_Project/Workspace/test_model_data'
    output_path = '/home/osboxes/PycharmProjects/ML_final_project/workspace'
    if os.path.exists(workspace_path) and remove_tmp_if_exist:
        shutil.rmtree(workspace_path)

    dataset_utils.prepare_dataset(images_path,
                                  scale_image_size=(28, 28),
                                  normalize_values=False,
                                  change_to_gray=False,
                                  smooth_factor=1,
                                  in_memory=False,
                                  target_path=workspace_path)

    run_model.test_model(workspace_path, labels_path, model_path, output_path)
Exemplo n.º 3
0
def main():
    gpus = tf.config.experimental.list_physical_devices('GPU')
    for gpu in gpus:
        tf.config.experimental.set_memory_growth(gpu, True)
    config = get_config()
    if config['train'] and not config['resume']:
        for key in ['folder_log', 'folder_out']:
            if os.path.exists(config[key]):
                raise FileExistsError(config[key])
            os.makedirs(config[key])
        with open(os.path.join(config['folder_out'], 'config.yaml'), 'w') as f:
            yaml.safe_dump(config, f)
    strategy = tf.distribute.MirroredStrategy()
    data_loaders = get_data_loaders(strategy, config)
    net = get_model(config)
    if config['train']:
        train_model(strategy, config, data_loaders, net)
    test_model(strategy, config, data_loaders, net)
    return
Exemplo n.º 4
0
def self_test_test(if_show_qual_rslt=False):
  """ Self test the model with toy data, testing part.
  """
  # Load and prepare fake data
  buckets, bucket_collapsed, bucket_sizes, train_tuple_b, word2id, \
      word_embeddings = fake_data_with_buckets()
  mydata = MyData(train_tuple_b, train_tuple_b, train_tuple_b,
                  word2id, word_embeddings)
  assert(mydata.train_buckets == buckets)
  assert(mydata.train_bucket_sizes == bucket_sizes)
  assert(mydata.valid_buckets == buckets)
  assert(mydata.valid_bucket_sizes == bucket_sizes)
  buckets_t = zip(*buckets)

  # Create model with vocabulary of 10, 2 small buckets
  config = SelfTestConfig()
  config.vocab_size = 10
  config.max_question_length = max(buckets_t[1])
  config.max_sentence_num = max(buckets_t[0])
  config.max_sentence_length = max(buckets_t[2])
  config.data_type = data_type()
  config.batch_size = 2
  config.init_scale = np.sqrt(6.0 / (config.word_embed_size + config.rnn_hidden_size))
  with tf.Session() as session:
    # Set random seed to fixed number
    tf.set_random_seed(FLAGS.tf_random_seed)
    random.seed(FLAGS.random_seed)
    np.random.seed(FLAGS.random_seed)

    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    embedding_initializer = tf.constant_initializer(word_embeddings,
                                                    dtype=config.data_type)
    with tf.variable_scope("model", reuse=False, initializer=initializer):
      m_test = MIL_AnswerTrigger(config=config, buckets=buckets,
                                 is_training=False,
                                 embedding_initializer=embedding_initializer)
    test_model(FLAGS, session, m_test, config, mydata, if_test=True,
               if_show_qual_rslt=if_show_qual_rslt, if_load_ckpt=True)
Exemplo n.º 5
0
def main():
    config = get_config()
    if config['train'] and not config['resume']:
        for key in ['folder_log', 'folder_out']:
            if os.path.exists(config[key]):
                raise FileExistsError(config[key])
            os.makedirs(config[key])
        with open(os.path.join(config['folder_out'], 'config.yaml'), 'w') as f:
            yaml.safe_dump(config, f)
    data_loaders, image_shape = get_data_loaders(config)
    config['image_shape'] = image_shape
    if 'crop_shape' not in config:
        config['crop_shape'] = [
            val if idx == 0 else val // 2
            for idx, val in enumerate(image_shape)
        ]
    net = get_model(config)
    net_gen = None if config['path_pretrain'] is None else get_model(config)
    if config['train']:
        train_model(config, data_loaders, net, net_gen)
    test_model(config, data_loaders, net)
    return
Exemplo n.º 6
0
def test(if_show_qual_rslt=False):
  """Test the model."""
  buckets = BUCKETS
  train_tuple_b, valid_tuple_b, test_tuple_b, word2id, word_embeddings = \
      prep_data()
  mydata = MyData(train_tuple_b, valid_tuple_b, test_tuple_b, word2id,
                  word_embeddings)
  assert(mydata.test_buckets == buckets)
  buckets_t = zip(*buckets)

  # Create model
  config = get_config()
  config.vocab_size = len(word2id)
  config.max_sentence_num = max(buckets_t[0])
  config.max_question_length = max(buckets_t[1])
  config.max_sentence_length = max(buckets_t[2])
  config.data_type = data_type()
  config.batch_size = 1
  config.init_scale = np.sqrt(6.0 / (config.word_embed_size + config.rnn_hidden_size))
  print_config(config)

  with tf.Session() as session:
    # Set random seed to fixed number
    tf.set_random_seed(FLAGS.tf_random_seed)
    random.seed(FLAGS.random_seed)
    np.random.seed(FLAGS.random_seed)

    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    embedding_initializer = tf.constant_initializer(word_embeddings,
                                                    dtype=config.data_type)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = MIL_AnswerTrigger(config=config, buckets=buckets,
                            is_training=False,
                            embedding_initializer=embedding_initializer)
    test_model(FLAGS, session, m, config, mydata, if_test=True,
               if_show_qual_rslt=if_show_qual_rslt, if_load_ckpt=True)
Exemplo n.º 7
0
def main():

    parser = argparse.ArgumentParser(description='Trainning model ... ')
    #
    '''
    modify here accordingly ...
    '''
    #
    id_process = os.getpid()
    time_current = datetime.datetime.now().isoformat()
    #
    #
    parser.add_argument('-fd',
                        '--FileData',
                        required=False,
                        help='Path of the dataset')
    #
    parser.add_argument('-fp',
                        '--FilePretrain',
                        required=True,
                        help='File of pretrained model')
    parser.add_argument('-mt', '--MapTest', required=False, help='Test Map')
    #parser.add_argument(
    #    '-sr', '--SaveResults', required=False,
    #    help='Save results ? True or False'
    #)
    parser.add_argument('--saveresults',
                        dest='saveresults',
                        action='store_true')
    parser.add_argument('--no-saveresults',
                        dest='saveresults',
                        action='store_false')
    parser.set_defaults(saveresults=False)
    #
    args = parser.parse_args()
    #
    #print "args.saveresults : ", args.saveresults
    #
    if args.FileData == None:
        args.FileData = None
    #
    assert (args.FilePretrain != None)
    args.PathPretrain = os.path.abspath(args.PathPretrain)
    if args.MapTest == None:
        args.MapTest = 'l'
    else:
        args.MapTest = str(args.MapTest)
    if args.saveresults == False:
        file_save = None
    else:
        pretrain = args.FilePretrain
        tag_save = pretrain.split('track_')[1].split('/model.pkl')[0]
        file_save = './results/result_' + tag_save + '.pkl'
    #
    print("PID is : %s" % str(id_process))
    print("TIME is : %s" % time_current)
    #
    print("FileData is : %s" % args.FileData)
    print("FilePretrain is : %s" % args.FilePretrain)
    print("MapTest is : %s" % str(args.MapTest))
    print("Save Results ? : %s" % file_save)
    #
    dict_args = {
        'PID': id_process,
        'TIME': time_current,
        'FileData': args.FileData,
        'FilePretrain': args.FilePretrain,
        'MapTest': args.MapTest
    }
    #
    input_tester = {
        'path_rawdata': args.FileData,
        'path_model': args.FilePretrain,
        'args': dict_args,
        'map_test': args.MapTest,
        'file_save': file_save
    }
    #
    run_model.test_model(input_tester)
Exemplo n.º 8
0
    args = get_arguments()
    # dataloaders, args.image_planes, args.image_full_height, args.image_full_width = get_dataloaders(args)
    dataloaders, image_shapes = get_data_loaders(args)
    args.image_planes, args.image_full_height, args.image_full_width = image_shapes
    args.image_crop_height = args.image_full_height // 2
    args.image_crop_width = args.image_full_width // 2
    if args.train:
        if not os.path.exists(args.folder):
            os.mkdir(args.folder)
        exclude_keys = [
            'path_config',
            'folder',
            'train',
            'file_args',
            'file_log',
            'file_model',
            'file_result_base',
        ]
        args_dict = {
            key: val
            for key, val in args.__dict__.items() if key not in exclude_keys
        }
        with open(os.path.join(args.folder, args.file_args), 'w') as f:
            yaml.safe_dump(args_dict, f)
        net = get_model(args).cuda()
        train_model(args, net, dataloaders)
    else:
        net = get_model(args, os.path.join(args.folder,
                                           args.file_model)).cuda()
        test_model(args, net, dataloaders)
Exemplo n.º 9
0
map_sequence = ['grid', 'l', 'jelly']

if __name__ == '__main__':
    print('Start Map tester ...')

    # declare the parser
    parser = argparse.ArgumentParser(description='test the hong yuan moel')

    # add the argument
    # parser.add_argument('-log_file', help='log file content', required = True)
    # parser.add_argument('-save_file_path', help='save file path', required = True)
    parser.add_argument('-model_path', help='model path', required=True)
    parser.add_argument('-seed', help='random seed', required=True)
    parser.add_argument('-test_map', help='map_for_test', required=True)
    parser.add_argument('-file_save', help='file save path', required=True)

    # parse the argument
    args = parser.parse_args()

    # input_trainer['log_file'] = args.log_file
    # input_trainer['save_file_path'] = args.save_file_path
    input_tester['seed'] = int(args.seed)
    input_tester['map_test'] = map_sequence[int(args.test_map)]
    input_tester['path_model'] = [args.model_path]
    input_tester['file_save'] = args.file_save
    # input_trainer['map_train'] = [map_sequence[idx] for idx in range(3) if idx != int(args.test_map)]

    print(input_tester)
    run_model.test_model(input_tester)
Exemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser(description='Testing model ... ')

    id_process = os.getpid()
    time_current = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    tag_model = '_PID=' + str(id_process) + '_TIME=' + time_current
    path_track = os.path.abspath('./testing/track' + tag_model + '/')

    if not os.path.exists(path_track):
        os.makedirs(path_track)

    file_log = os.path.join(path_track, 'log.txt')
    file_results = os.path.join(path_track, 'results.txt')

    #
    # The directory from which to find the data. default in data_processers is './data/'
    parser.add_argument('-fd',
                        '--FileData',
                        required=False,
                        help='Path of the data-set')
    #
    parser.add_argument(
        '-fp',
        '--FilePretrain',
        required=True,  # TODO change to True
        help='File of pretrained model')
    parser.add_argument('-mt',
                        '--MapTest',
                        required=True,
                        choices=['map_1', 'map_2', 'map_3'],
                        type=str,
                        help='Test Map')

    parser.add_argument('-sr',
                        '--SaveResults',
                        required=False,
                        action='store_true',
                        help='Save Results (True/False, default is false)')

    parser.add_argument('-sb',
                        '--SizeBeam',
                        required=False,
                        choices=range(1, 20),
                        default=4,
                        type=int,
                        help='Size of Beam (Integer, default is 4)')

    parser.add_argument(
        '-lnf',
        '--LengthNormalizationFactor',
        required=False,
        default=0.5,
        help='Length Normalization Factor [0.5-0.7] (0.5 is the default)')

    args = parser.parse_args()
    args.id_process = id_process
    args.time_current = time_current

    assert (args.FilePretrain is not None)
    args.PathPretrain = os.path.abspath(args.FilePretrain)

    args.MapTest = str(args.MapTest)

    assert isinstance(args.LengthNormalizationFactor, float)
    assert 0.5 <= args.LengthNormalizationFactor <= 0.7  # according to 'Google's neural machine translation system: Bridging the gap between human and machine translation'

    assert isinstance(args.SizeBeam, int), "Size of Beam is not an int"

    args.SaveResultsPath = file_results
    create_logger(file_results, 'testing results log')
    logger = create_logger(file_log, 'testing log')
    logger.info(args)

    run_model.test_model(args)