Пример #1
0
def main():
    test_false = [-3, 43, 10000, pow(3, 20)]
    test_true = [0, 4, 64, 1024, pow(2, 20)]
    for n in test_false:
        test(is_power_of_two(n), False)

    for n in test_true:
        test(is_power_of_two(n), True)
Пример #2
0
def main():
    k = 4
    n = 1000
    arr = random.sample(range(n), n / 3)
    sort = sorted(arr, reverse = True)
    print 'test result...'
    for i in range(k):
        test(find_kth(arr, 0, len(arr) - 1, i), sort[i])
Пример #3
0
def main():
    cases = [
            [3, 4, 0, -3, 4, 2],
            [],
            [0,0,0],
            ['a','bc', 'ax','bb']
            ]

    for arr in cases:
        print arr
        test(merge_sort(arr), sorted(arr))
Пример #4
0
def main():
    parser = argparse.ArgumentParser()
    # 模型参数
    parser.add_argument("--max_sequence_length", default=140, help="Bert input max sequence length", type=int)

    # 路径参数设置
    parser.add_argument("--train_dataset_path", default='{}/dataset/src_data/train_dataset/nCoV_100k_train.labled.csv'.format(BASE_DIR), help="Train folder")
    parser.add_argument("--test_dataset_path", default='{}/dataset/src_data/test_dataset/nCov_10k_test.csv'.format(BASE_DIR), help="Test folder")
    parser.add_argument("--test_submit_example_path", default='{}/data/test_dataset/submit_example.csv'.format(BASE_DIR), help="submit_example folder")
    parser.add_argument("--bert_pretrain_path", default='{}/dataset/bert_base_chinese/'.format(BASE_DIR), help="Bert Pretrain folder")

    # others
    parser.add_argument("--input_categories", default="微博中文内容", help="输入文本的文本内容列")
    parser.add_argument("--output_categories", default="情感倾向", help="标签列")
    parser.add_argument("--epochs", default=2, help="train epochs", type=int)
    parser.add_argument("--batch_size", default=8, help="train batch_size", type=int)

    # 交叉验证参数
    parser.add_argument("--n_splits", default=5, help="train n_splits", type=int)
    parser.add_argument("--use_cross_valid", default=True, help="是否使用交叉验证")
    parser.add_argument("--cross_dataset_path", default='{}/dataset/cross_data/'.format(BASE_DIR),
                        help="Cross valid folder")

    # 数据集分割路径参数
    parser.add_argument("--split_dataset_path", default='{}/dataset/split_data/'.format(BASE_DIR), help="Split dataset folder")

    # mode
    parser.add_argument("--mode", default='test', help="training or test options")
    parser.add_argument("--loss_type", default="focal_loss", help="loss type is focal_loss or cross_entropy")
    parser.add_argument("--learning_rate_1", default=1e-5, help="learning_rate_1")
    parser.add_argument("--learning_rate_2", default=1e-4, help="learning_rate_2 is None or 1e-4...")
    parser.add_argument("--use_different_learning_rate", default=True, help="是否使用不同的学习率")


    # checkpoint
    parser.add_argument("--model_checkpoint_dir", default='{}/ckpt'.format(BASE_DIR), help="Model folder")

    args = parser.parse_args()
    params = vars(args)

    gpus = tf.config.experimental.list_physical_devices(device_type='GPU')

    if gpus:
        tf.config.experimental.set_visible_devices(devices=gpus[0], device_type='GPU')

    if params["mode"] == "train":
        train(params)
    elif params["mode"] == "test":
        test(params)
Пример #5
0
def main():
    print '**** test binary add by string ****'
    test(bi_add('11111','1'), '100000')
    test(bi_add('1010','1010'), '10100')
    
    print '**** test reverse polish notation (RPN) ****'
    test(reverse_polish(' 5   10  + 3 * 15  - '), 30.0)
    # exceptions
    reverse_polish('2 s  + 4 0 /')
    reverse_polish('21 + s 0 /')
    reverse_polish('2 1 + 0 /')

    s = 'the same one'
    tests = ['', 'the same one', ' same', 'he', ' one', 'dif one', 'thesame', ' one ']
    print '**** test is_substring ****'
    for sub in tests:
        test(is_substring(s, sub), str.find(s, sub) > -1)
Пример #6
0
def main(args, config):
    dataset = create_dataset(args, config)
    train_loader, val_loader, test_loader = dataset.create_loaders()
    args.flow_args = [
        args.n_blocks, args.flow_hidden_size, args.n_hidden, args.flow_model,
        args.flow_layer_type
    ]
    model = create_model(args, config).to(args.dev)
    print(vars(args))
    print(model)
    print('number of parameters : {}'.format(
        sum([np.prod(x.shape) for x in model.parameters()])))
    if not args.test:
        trained_model = train_graph_generation(args, config, train_loader,
                                               val_loader, test_loader, model)
    else:
        test_model = test(args, config, model, dataset)
Пример #7
0
def main():
    # test list...
    cases = [
            [],
            [1, 1, 1],
            [-4,-2, 0, 1, 4, 6, 7],
            ]
    for c in cases:
        tmp = c
        tmp.reverse()
        test(reverse(c), tmp)

    # string...
    cases = [
            'abccdwsef',
            'h hh',
            'I love rongchao, hope rongchao love me too @_@'
            ]
    for c in cases:
        print '*****test reverse string'
        test(reverse_str(c), c[::-1])
        print '*****test is char unique'
        test(is_chars_unique(c, False), False)
        print '*****test replace space in string'
        test(replace_space(c), c.replace(' ', '%20'))
        print '*****test replace(old, new)'
        test(my_replace(c, 'rongchao', 'Danielle'), 
                c.replace('rongchao', 'Danielle'))

    print '*****test if 2 strings anagrams'
    test(is_anagrams('abeed23', 'e2aeb3d'), True)
    print '*****test s1 is s2 rotation' 
    test(is_rotation('waterbottle', 'erbottlewat'), True)
    test(is_rotation('ri', 'i'), False)
    print '*****test remove duplicate chars in a string'
    test(remove_dup('abeed223', True), 'abed23')
    test(remove_dup('abeed223', False), 'abed23')