Example #1
0
 def test_parameters_are_not_none(self):
     self.assertNotEqual(parse_args().addr, None,
                         'IP адрес не может быть None или Null')
     self.assertNotEqual(parse_args().port, None,
                         'Номер порта не может быть None или Null')
     self.assertNotEqual(parse_args().user, None,
                         'Имя пользователя не может быть None или Null')
Example #2
0
def _input_validator(args):
    try:
        parse_args(args)
    except SystemExit as e:
        assert isinstance(e.__context__, argparse.ArgumentError)
        assert 'Value has to be greater than 0' in str(e.__context__)
    else:
        raise ValueError("Exception not raised")
Example #3
0
 def test_single_arg(self):
     args1 = get_default_args()
     args1[arg.JOIN] = True
     args2 = parse_args('join')
     self.assertEqual(args1, args2)
     args1 = get_default_args()
     args1[arg.SCALE_CORNER] = 3
     args2 = parse_args('scalecorner=3')
     self.assertEqual(args1, args2)
Example #4
0
    def test_six_files(self):
        filenames = [self.__random_filename() for _ in range(6)]
        found_filenames = []

        # Make sure argparse checks all generated files
        def mock_open(in_filename, in_mode, *args, **kwargs):
            self.assertIn(in_filename, filenames)
            self.assertEqual(in_mode, "r")
            found_filenames.append(in_filename)
            return unittest.mock.MagicMock()

        main.argparse.open = mock_open
        main.parse_args(filenames)
        self.assertEqual(found_filenames, filenames)
Example #5
0
def grid_search(config: dict):
    args = parse_args()
    results = {}

    for d in config["dataset"]:
        args.dataset = d
        best_acc, err_bd = 0., 0.
        best_args = vars(args)
        for arch in config["arch"]:
            args.architecture = arch
            for hidden in config["hidden"]:
                args.hid_dim = hidden
                for pool_ratio in config["pool_ratio"]:
                    args.pool_ratio = pool_ratio
                    for lr in config["lr"]:
                        args.lr = lr
                        for weight_decay in config["weight_decay"]:
                            args.weight_decay = weight_decay
                            acc, bd = run_experiments(args)
                            if acc > best_acc:
                                best_acc = acc
                                err_bd = bd
                                best_args = deepcopy(vars(args))
        args.output_path = "./output"
        if not os.path.exists(args.output_path):
            os.makedirs(args.output_path)
        args.output_path = "./output/{}.log".format(d)
        result = {
            "params": best_args,
            "result": "{:.4f}({:.4f})".format(best_acc, err_bd)
        }
        with open(args.output_path, "w") as f:
            json.dump(result, f, sort_keys=True, indent=4)
Example #6
0
def setup(opts):
    args = parse_args()
    args.dataset = 'portrait'
    gan = UGATIT(sess, args)
    gan.build_model()
    gan.load_from_latest(opts['checkpoint'])
    return gan
Example #7
0
def setup():
    args = parse_args()

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    data = importlib.import_module('{}_data'.format(cfg.DATASET_NAME))

    # Configure CUDA
    if args.manualSeed is None:
        args.manualSeed = random.randint(1, 10000)
    random.seed(args.manualSeed)
    torch.manual_seed(args.manualSeed)
    if cfg.CUDA:
        torch.cuda.manual_seed_all(args.manualSeed)
    num_gpu = len(cfg.GPU_ID.split(','))

    # Set output path
    output_dir = os.path.join(args.output_dir,
                              '%s_%s/' % (cfg.DATASET_NAME, cfg.CONFIG_NAME))
    test_sample_save_dir = output_dir + 'Test/'
    if not os.path.exists(test_sample_save_dir):
        os.makedirs(test_sample_save_dir)

    args.data = data
    args.num_gpu = num_gpu
    args.output_dir = output_dir
    args.test_sample_save_dir = test_sample_save_dir

    return args
Example #8
0
 def test_exception_is_not_raised_if_all_required_flags_are_provided(
         self, stderr):
     arguments = parse_args(['localhost', '1337', 'localhost', '1338'])
     self.assertEqual(arguments.hostname, 'localhost')
     self.assertEqual(arguments.port, 1337)
     self.assertEqual(getattr(arguments, 'app-port'), 1338)
     self.assertEqual(getattr(arguments, 'app-hostname'), 'localhost')
Example #9
0
def test_argument_parser():
    test_player1 = random.choice(characters)
    test_player2 = random.choice(characters)
    parser = parse_args(
        ["--first", test_player1, "--second", test_player2, "--trials", "100"])
    assert parser.first == test_player1
    assert parser.second == test_player2
    assert parser.trials == 100
Example #10
0
def setup(opts):
    args = parse_args()
    args.phase = 'test'
    args.img_size = 256
    gan = UGATIT(sess, args)
    gan.build_model()
    gan.load_from_latest(opts['checkpoint'])
    return gan
Example #11
0
    def test_ignore_oval_licence(self):
        licence = RaceType.Oval.value
        self.params += ['--ignore', str(licence)]

        parser = main.parse_args(self.params)
        ignore_list = parser.ignore

        assert_true(licence in ignore_list)
        assert_equal(len(ignore_list), 1)
Example #12
0
    def test_parse_args_options(self):
        argv = ['-t', 'test_table', '-d', 'sqlite://', '-b', '120', '-a', '-v', 'TestData']
        args = main.parse_args(argv)

        self.assertEqual(args.fname, 'TestData')
        self.assertEqual(args.table_name, 'test_table')
        self.assertEqual(args.database, 'sqlite://')
        self.assertEqual(args.batch, 120)
        self.assertTrue(args.verbose)
        self.assertTrue(args.append)
Example #13
0
    def test_parse_args_defaults(self):
        argv = ['TestData']
        args = main.parse_args(argv)

        self.assertEqual(args.fname, 'TestData')
        self.assertIsNone(args.table_name)
        self.assertEqual(args.database, 'sqlite:///TestData.db')
        self.assertEqual(args.batch, 120000)
        self.assertFalse(args.verbose)
        self.assertFalse(args.append)
Example #14
0
    def test_parse_args_defaults(self):
        argv = ['TestData']
        args = main.parse_args(argv)

        self.assertEqual(args.fname, 'TestData')
        self.assertIsNone(args.table_name)
        self.assertEqual(args.database, 'sqlite:///TestData.db')
        self.assertEqual(args.batch, 120000)
        self.assertFalse(args.verbose)
        self.assertFalse(args.append)
Example #15
0
 def test_basic_call(self):
     with NamedTemporaryFile(delete=False) as fw:
         for i in range(10000):
             p = Popen(['radamsa'], stdout=PIPE, stdin=PIPE, stderr=PIPE)
             try:
                 test_args = p.communicate(input='h_pages=2'.encode())[0]
             except:
                 pass
             else:
                 try:
                     with self.assertRaises(ValueError):
                         parse_args(test_args.decode())
                 except AssertionError:
                     fw.write(test_args + '\n'.encode())
             finally:
                 p.terminate()
     call('sort -u {}'.format(fw.name), shell=True)
     unlink(fw.name)
     ret = input('Looking good? [N, y] ')
     self.assertEqual(ret.lower().strip(), 'y')
Example #16
0
def do(cmd_args):
    args = parse_args(cmd_args)
    rewards = run(args)
    avg_reward = np.average(rewards)
    std_reward = np.std(rewards)
    results = {
        "args": cmd_args,
        "avg_reward": avg_reward,
        "std_reward": std_reward,
        "rewards": rewards,
    }
    return results
Example #17
0
 def test_wrong_keyword(self):
     with self.assertRaises(ValueError):
         parse_args('wfawr')
     with self.assertRaises(ValueError):
         parse_args('joi n')
     with self.assertRaises(ValueError):
         parse_args('scale=20.2, widht=10')
Example #18
0
    def test_parse_args_options(self):
        argv = [
            '-t', 'test_table', '-d', 'sqlite://', '-b', '120', '-a', '-v',
            'TestData'
        ]
        args = main.parse_args(argv)

        self.assertEqual(args.fname, 'TestData')
        self.assertEqual(args.table_name, 'test_table')
        self.assertEqual(args.database, 'sqlite://')
        self.assertEqual(args.batch, 120)
        self.assertTrue(args.verbose)
        self.assertTrue(args.append)
Example #19
0
    def test_ignore_dirt_licences(self):
        self.params += [
            '--ignore',
            str(RaceType.Dirt_Oval.value),
            str(RaceType.Dirt_Road.value)
        ]

        parser = main.parse_args(self.params)
        ignore_list = parser.ignore

        assert_list_equal([RaceType.Dirt_Oval.value, RaceType.Dirt_Road.value],
                          ignore_list)
        assert_equal(len(ignore_list), 2)
    def test_trans_coverage(self):
        args = main.parse_args(["--dir=tests"])
        ext = tuple(args.ext.split())

        e_count, n_count = main.trans_coverage(-1, args, "tests", ext)
        print("test dir: ", e_count, n_count)
        self.assertNotEqual(0, e_count)
        self.assertNotEqual(0, n_count)

        # Generate/update sample file
        # `python main.py --dir tests > sample.md`
        with open("sample.md", "r") as sample:
            s1 = sample.read().splitlines()
            s2 = main.print_out(args).splitlines()
            self.assertEqual(s1, s2)
Example #21
0
 def test_all_arguments(self):
     args1 = {
         arg.H_PAGES: 3,
         arg.V_PAGES: 2,
         arg.NORTH: 4,
         arg.KEYPAD: 2,
         arg.NX: 4,
         arg.NY: 6,
         arg.TITLE: 'AO QESHM',
         arg.SCALE_CORNER: 1,
         arg.SCALE: 2.0,
         arg.JOIN: True,
         arg.WIDTH: 9.0
     }
     args2 = parse_args(
         'h_pages=3, v_pages=2, north=4, nx=4, ny=6, title=AO QESHM, scale=2,'
         + 'scalecorner=1, join, width = 9.0, keypad=2')
     self.assertEqual(args1, args2)
Example #22
0
 def __init__(self, config, env_creator):
     self.config = config
     self.config.update(config)
     self.args = parse_args([
         "--multi-step={}".format(self.config["n_step"]),
         "--discount={}".format(self.config["gamma"]),
         "--lr={}".format(self.config["lr"]),
         "--game={}".format(self.config["env"]),
     ])
     self.env = wrap_deepmind(env_creator(self.config["env_config"]),
                              frame_stack=True,
                              scale=True)
     self.action_space = self.env.action_space.n
     self.agent = Agent(self.args, self.action_space)
     self.state = to_rainbow(self.env.reset())
     self.local_timestep = 0
     self.episode_rewards = [0.0]
     self.episode_lengths = [0.0]
     self.lock = Lock()
Example #23
0
    def init_model(self):
        args = parse_args()
        if args is None:
            exit()
        args.phase = 'test'
        args.dataset = 'selfie2anime'
        # args.light = 'True'
        args.img_size = 256

        # open session
        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        gan = UGATIT(sess, args)
        # build graph
        gan.build_model()

        # show network architecture
        # show_all_variables()
        gan.init_model(sess)

        return gan, sess
Example #24
0
def indicator_data_save():
    from dataset.datasets import LayoutDataset
    # config cfg file
    args = parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    indicator_path = "/home/zhoujiaqiu/Code/GAN/LayoutGenerator/dataset/data" \
                     "/indicator_train_data/constraint_inter"
    dataset = LayoutDataset(cfg.DATA_DIR, cfg.INDICATOR_DIR, train_set=True)

    convert_layout = ConversionLayout()
    constraint_rules = [1, 2, 3, 4, 5]
    for n in range(len(dataset.pair_room_datas)):
        pair_data, init_contour, indicator_hull = dataset.pair_room_datas[n], dataset.pair_init_contours[n], \
                                                  dataset.indicator_hull[n]
        indicator_values = np.zeros((len(pair_data), 1))
        for i in range(len(pair_data)):
            # data: [1, 2500, 2, 4, 3] init_contour: [2500, 2, n, 3]
            # indicator_hull: [1, 2500, 2, 5, n, 2]
            layout1_room, layout2_room = pair_data[i][0], pair_data[i][1]
            layout1_init_contour, layout2_init_contour = init_contour[i][
                0], init_contour[i][1]
            layout1_hull, layout2_hull = indicator_hull[i][0], indicator_hull[
                i][1]
            indicator_value = indicator.calculate_score(
                layout1_init_contour, layout1_hull, layout2_hull,
                constraint_rules)
            indicator_values[i] = indicator_value
            print(
                "on {} layouts, the {} indicator_value save finished!".format(
                    n, i))

            # /****** visualization ******/
            # from trainer.trainer_evaluator import visualization
            # save_path = "/home/zhoujiaqiu/Code/GAN/ours_layout_generator/dataset/visualization/{}.png".format(n)
            # visual_hull = [layout1_hull, layout2_hull]
            # visualization(visual_hull, [indicator_value, indicator_value], save_path)
        save_indicator_path = os.path.join(
            indicator_path, "layout_{}_indicator_value.npy".format(n))
        np.save(save_indicator_path, indicator_values)
Example #25
0
def serve(args=None):
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    env = Environment(args)
    env.GetStockDataVecFN("SomeData.csv", False)
    env.check_data_integrity4prediction_at("2018-09-03 10:05:00")
    agent = Agent(
        env.get_state_size(),
        env.get_action_size(),
        2.5e-4  #lr
        ,
        1e-2  #env.tau
        ,
        args.dueling,
        args.hidden_dim)
    agent.loadModel(r"D:\PycharmProjects\DDQN_tester\model_1")

    from wsgiref.simple_server import make_server

    wsgi_app = WsgiApplication(application)
    server = make_server('0.0.0.0', 8000, wsgi_app)
    server.serve_forever()
def main():
    args = parse_args()

    t0 = time.time()

    if not os.path.exists(args.save_directory):
        os.makedirs(args.save_directory)
    LOG_PATH = os.path.join(args.save_directory, 'log')
    with open(LOG_PATH, 'w+') as ouf:
        pass

    print("Loading File IDs and Y Data")
    train_ids, train_ys = load_fid_and_y_data('train')
    dev_ids, dev_ys = load_fid_and_y_data('dev')
    test_ids, test_ys = load_fid_and_y_data('test')
    t1 = time.time()
    print_log('%.2f Seconds' % (t1 - t0), LOG_PATH)

    print("Building Charset")
    charset = build_charset(np.concatenate((train_ys, dev_ys, test_ys),
                                           axis=0))
    charmap = make_charmap(charset)  # {string: int}
    charcount = len(charset)
    t1 = time.time()
    print_log('%.2f Seconds' % (t1 - t0), LOG_PATH)

    print("Mapping Characters")
    testchars = map_characters(test_ys, charmap)
    print("Building Loader")
    test_loader = make_loader(test_ids,
                              testchars,
                              args,
                              shuffle=False,
                              batch_size=args.batch_size)

    if 'transcript' in args.test_mode or 'perp' in args.test_mode:
        print("Building Model")
        model = Seq2SeqModel(args, vocab_size=charcount)

        CKPT_PATH = os.path.join(args.save_directory, 'model.ckpt')
        if torch.cuda.is_available():
            model.load_state_dict(torch.load(CKPT_PATH))
        else:
            gpu_dict = torch.load(CKPT_PATH,
                                  map_location=lambda storage, loc: storage)
            cpu_model_dict = {}
            for key, val in gpu_dict.items():
                cpu_model_dict[key] = val.cpu()
            model.load_state_dict(cpu_model_dict)
        print("Loaded Checkpoint")

        if torch.cuda.is_available():
            model = model.cuda(args.cuda)

        model.eval()

    transcript_log_path = os.path.join(args.save_directory,
                                       'transcript_log.txt')
    csv_path = os.path.join(args.save_directory, 'submission.csv')

    if 'transcript' in args.test_mode:
        print('generating transcripts')
        with open(transcript_log_path, 'w+') as ouf:
            pass
        if not os.path.exists(csv_path):
            transcripts = write_transcripts(path=csv_path,
                                            args=args,
                                            model=model,
                                            loader=test_loader,
                                            charset=charset,
                                            log_path=transcript_log_path)
        else:
            transcripts = []
            with open(csv_path, 'r') as csvfile:
                raw_csv = csv.reader(csvfile)
                for row in raw_csv:
                    with open(transcript_log_path, 'a') as ouf:
                        ouf.write('%s\n' % row[1])
                    transcripts.append(row[1])
        t1 = time.time()
        print("Finshed Writing Transcripts")
        print('%.2f Seconds' % (t1 - t0))

    if 'cer' in args.test_mode:
        print('calculating cer values')
        cer_log_path = os.path.join(args.save_directory, 'cer_log.txt')
        with open(cer_log_path, 'w+') as ouf:
            pass
        transcripts = []
        with open(csv_path, 'r') as csvfile:
            raw_csv = csv.reader(csvfile)
            for row in raw_csv:
                transcripts.append(row[1])
        transcripts = [l.strip() for l in transcripts]
        cer_path = os.path.join(args.save_directory, 'test_cer.npy')
        dist_path = os.path.join(args.save_directory, 'test_dist.npy')
        norm_dists, dists = cer_from_transcripts(transcripts, test_ys,
                                                 cer_log_path)
        np.save(cer_path, norm_dists)
        np.save(dist_path, dists)
        print('avg CER:', np.mean(norm_dists))

    if 'perp' in args.test_mode:
        print('calculating perp values')
        PERP_LOG_PATH = os.path.join(args.save_directory, 'perp_log.txt')
        with open(PERP_LOG_PATH, 'w+') as ouf:
            pass
        perp_path = os.path.join(args.save_directory, 'test_perp.npy')
        all_perps = perplexities_from_x(model, test_loader)
        np.save(perp_path, all_perps)
 def test_parameters_are_not_none(self):
     self.assertNotEqual(parse_args().a, None,
                         'IP адрес не может быть None или Null')
     self.assertNotEqual(parse_args().p, None,
                         'Номер порта не может быть None или Null')
 def test_number_parameters(self):
     self.assertEqual(len(parse_args().__dict__), 2,
                      'Нет необходимых трёх параметров для запуска')
     pass
 def test_parse_args_short_arg(self):
     args = main.parse_args(["-p", password])
     self.assertEqual(args.password, password)
 def test_parse_args_with_invalid_args(self):
     with self.assertRaises(SystemExit): main.parse_args(["-x", "y"])
 def test_parse_args_long_arg(self):
     args = main.parse_args(["--password", password])
     self.assertEqual(args.password, password)        
Example #32
0
 def test_empty(self):
     self.assertEqual(parse_args(''), get_default_args())
     self.assertEqual(parse_args('  '), get_default_args())
     self.assertEqual(parse_args('\n'), get_default_args())
Example #33
0
 def test_parse_add(self):
     args = main.parse_args(['add', 'tom', '123-456-7890', '-b', 'phonebook'])
     self.assertEqual(vars(args), {'func':main.add, 'name':'tom', 'phonebook':['phonebook'], 'number':'123-456-7890'})
     self.assertRaises(argparse.ArgumentError, main.parse_args, ['add', 'tom', '123123-456-7890', '-b', 'phonebook'])