def test_new_contest_problem(self, monkeypatch):
        monkeypatch.setattr('sys.stdin',
                            io.StringIO('\n\n\n\n\n\n\n\n\n\n\n\n\n\n'))
        tools.test(['new_contest', 'contest_name'])
        tools.test([
            'new_problem',
            '--contest',
            'contest_name',
            'Problem One',
            '--author',
            'Ragnar Groot Koerkamp',
            '--validation',
            'default',
        ])
        os.chdir('contest_name')
        monkeypatch.setattr(
            'sys.stdin',
            io.StringIO('Ragnar Groot Koerkamp\ncustom\n\n\n\n\n'))
        tools.test(['new_problem', 'Problem Two'])
        os.chdir('..')
        problemsyaml = Path('contest_name/problems.yaml').read_text()
        assert 'id: problemone' in problemsyaml
        assert 'id: problemtwo' in problemsyaml

        with pytest.raises(SystemExit) as e:
            tools.test(['pdf', '--contest', 'contest_name'])
        assert config.n_warn == 2
        assert Path('contest_name/contest.pdf').is_file()
        # TODO: Fix -- it's broken currently.
        # tools.test(['solutions', '--contest', 'contest_name'])
        tools.test(['tmp', '--clean', '--contest', 'contest_name'])
Example #2
0
def setup_problem(request):
    problemname = request.param
    problem_dir = RUN_DIR / 'test/problems' / problemname
    os.chdir(problem_dir)
    yield
    tools.test(['tmp', '--clean'])
    os.chdir(RUN_DIR)
Example #3
0
def setup_identity_problem(request):
    problem_dir = RUN_DIR / 'test/problems/identity'
    os.chdir(problem_dir)
    try:
        tools.test(['tmp', '--clean'])
        yield
    finally:
        tools.test(['tmp', '--clean'])
        os.chdir(RUN_DIR)
Example #4
0
def main(args=None):
    parser, cfg = parse_args(args)
    device = torch.device('cuda')
    use_cuda = torch.cuda.is_available()
    # >>> Data
    print("\n[Phase 1]: Data Preparation for {}.".format(parser.dataset))
    trainloader, testloader = dataloader(cfg)

    # >>> Model
    print('\n[Phase 2] : Model setup')
    if cfg['CONFIG']['RECONSTRUCT'] and (cfg['CONFIG']['DROPOUT_TYPE']
                                         == 'GUIDED'):
        net, net_D, net_R, checkpoint = model_setup(parser, cfg)
        if use_cuda:
            map(lambda x: x.cuda(), [net, net_D, net_R])
            net, net_D, net_R = \
                list(map(lambda x : torch.nn.DataParallel(x, device_ids=range(torch.cuda.device_count())), [net, net_D, net_R]))
    else:
        net, checkpoint = model_setup(parser, cfg)
        try:
            print(net.dropout)
        except AttributeError:
            print('No Dropout')
        if use_cuda:
            net.cuda()
            net = torch.nn.DataParallel(net,
                                        device_ids=range(
                                            torch.cuda.device_count()))
            #net.to(device)
            cudnn.benchmark = True

    if not parser.testOnly:
        # >>> Train Model
        print('\n[Phase 3] : Training model')
        print('| Training Epochs = ' +
              str(cfg['BASE']['SOLVER']['NUM_EPOCHS']))
        print('| Initial Learning Rate = ' + str(cfg['BASE']['SOLVER']['LR']))
        print('| Optimizer = ' + str(cfg['BASE']['SOLVER']['OPTIMIZER']))
        if cfg['CONFIG']['RECONSTRUCT'] and cfg['CONFIG'][
                'DROPOUT_TYPE'] == 'GUIDED':
            trainval_multi(net, net_D, net_R, trainloader, testloader, cfg,
                           checkpoint, use_cuda)


#        elif cfg['CONFIG']['DROPOUT_TYPE']=='GUIDED' and cfg['CONFIG']['RECONSTRUCT']==False:
#            trainval_guided(net, trainloader, testloader, cfg, checkpoint, use_cuda)
        else:
            trainval(net, trainloader, testloader, cfg, parser, checkpoint,
                     use_cuda)

    # >>> Test model
    test(net, testloader, use_cuda, cfg)
Example #5
0
def Menu():
    print("________________________")
    print("|SkSweetTooth Framework|")
    print("|  Version 1.0  Beta   |")
    print("|Written in Python by: |")
    print("|     Skittles_        |")
    print("|______________________|")
    print("")
    print("")
    print("")
    print("   __________________,.................,")
    print("   /_/_/_/_/_/_/_/_/,-' ,   ,.  -,-,--/|")
    print("  /_/_/_/_/_/_/_/,-' / /   /-|  / /--/ /")
    print(" /_/_/_/_/_/_/,-' `-' /__ /  \ / /__/ / ")
    print("/_/_/_/_/_/_,:...................../ /")
    print("|________,'________________________|/ ")
    print("")
    print("")
    print("")
    print("")
    print("")
    print("")
    print("(1) Passcrack (2) Port scanner (3)Start Server (4)About (exit)GTFO")

    #Defining variable for selection choice

    selection = input("Choose an option:")

    #Selection code. If no option is picked, or a wrong input is recieved, program stops.

    if selection == "1":
        tools.Passcrack()

    if selection == "2":
        tools.portscan()

    if selection == "4":
        tools.WhatIsSSTF()

    if selection == "3":
        sstfServer.Server()

    if selection == "exit":
        sys.exit()

    if selection == "hiddentest":
        tools.test()
    else:
        os.system('cls' if os.name == 'nt' else 'clear')
        Menu()
Example #6
0
def main():
    has_cuda = torch.cuda.is_available()

    dev = torch.device('cuda' if has_cuda else 'cpu')
    default_tensor = torch.cuda.FloatTensor if has_cuda else torch.FloatTensor

    torch.set_default_dtype(torch.float32)
    torch.set_default_tensor_type(default_tensor)

    # flat = single color channel
    emnist_train, emnist_test = loaders.emnist('digits', 5, dev)
    emnist_flat_train, emnist_flat_test = loaders.emnist_flat('digits', 5, dev)

    #fake_train, fake_test = loaders.fake(5, dev)
    #fake_flat_train, fake_flat_test = loaders.fakeflat(5, dev)

    conv_net = conv.Net()
    svd_net = svd.Net()

    print(f'ConvNet # of params: {tools.nparams(conv_net)}')
    print(f'SVDNet # of params: {tools.nparams(svd_net)}')
    print()

    conv_opt = optim.Adam(conv_net.parameters())
    svd_opt = optim.Adam(svd_net.parameters())

    nepoch = 3
    for epoch in range(nepoch):
        print(f'--- epoch {epoch}')

        cprint('SVDNet', 'red')
        tools.train(svd_net, dev, emnist_flat_train, svd_opt)
        tools.test(svd_net, dev, emnist_flat_test)
        print()

        cprint('ConvNet', 'blue')
        tools.train(conv_net, dev, emnist_train, conv_opt)
        tools.test(conv_net, dev, emnist_test)
        print()
Example #7
0
def runner(args):
    configs = load_config(args.config)
    project_configs = configs['PROJECT']
    model_configs = configs['MODEL']
    train_configs = configs['TRAIN']
    test_configs = configs['TEST']
    train_dataset_configs = configs['TRAIN_DATASET']
    test_dataset_configs = configs['TEST_DATASET']
    input_size = train_dataset_configs[
        'input_size'] if args.train else test_dataset_configs['input_size']

    if train_dataset_configs['channels'] == 3:
        base_transforms = transforms.Compose([
            transforms.Resize((input_size, input_size)),
            transforms.ToTensor()
        ])  # ,
        # transforms.Normalize(mean=train_dataset_configs['mean'], std=train_dataset_configs['std'])])
    elif train_dataset_configs['channels'] == 1:
        base_transforms = transforms.Compose([
            transforms.Resize((input_size, input_size)),
            transforms.ToTensor()
        ])  # ,
        # transforms.Normalize(mean=[sum(train_dataset_configs['mean']) / len(train_dataset_configs['mean'])],
        #                      std=[sum(train_dataset_configs['std']) / len(train_dataset_configs['std'])])])

    train_datasets = Fusion_Datasets(train_dataset_configs, base_transforms)
    test_datasets = Fusion_Datasets(test_dataset_configs, base_transforms)

    model = eval(model_configs['model_name'])(model_configs)
    print('Model Para:', count_parameters(model))

    if train_configs['resume'] != 'None':
        checkpoint = torch.load(train_configs['resume'])
        model.load_state_dict(checkpoint['model'].state_dict())

    if args.train:
        train(model, train_datasets, test_datasets, configs)
    if args.test:
        test(model, test_datasets, configs, load_weight_path=True)
Example #8
0
def eval(model_path):
    env = Visualizer(opt.env)
    split_info = parse_split_info(opt.split_train_val)
    valid_dataset = TripletLossDataset(train_root=opt.data_root,
                                       nb_select_class=2,
                                       nb_select_items=6,
                                       nb_time=1,
                                       is_train=False,
                                       specified_id_list=split_info['query'],
                                       resize_size=opt.resize_size)
    valid_dataloader = DataLoader(dataset=valid_dataset, batch_size=1)
    cmc_valid_dataset = EncoderDataset(data_root=opt.data_root,
                                       specific_id_list=split_info['query'],
                                       resize_size=opt.resize_size)
    cmc_dataloader = DataLoader(dataset=cmc_valid_dataset, batch_size=12)
    reid_metric = CMC(dataloader=cmc_dataloader)

    reid_model = getattr(models, opt.model_name)(resnet_50=True)

    min_valid_loss = 1e6
    reid_model = nn.DataParallel(reid_model)
    if opt.use_gpu:
        reid_model.cuda()
    reid_model.load_state_dict(torch.load(model_path))
    # Schedule learning rate
    reid_model.eval()
    top1, top5, top10 = reid_metric.cmc(reid_model)
    env.plot("load_top1", top1)
    env.plot("load_top5", top5)
    env.plot("load_top10", top10)

    reid_model.eval()
    rk_dict = test(reid_model, opt.resize_size)
    top1 = rk_dict[1]
    top5 = rk_dict[5]
    env.plot("load_fw_top1", top1)
    env.plot("load_fw_top5", top5)
Example #9
0
 def test_sort(self):
     tools.test(['sort'])
     tools.test(['sort', '--problem', '.'])
     tools.test(['sort', '--problem', str(Path().cwd())])
     tools.test(['sort', '--contest', '..'])
     tools.test(['sort', '--contest', str(Path.cwd().parent)])
Example #10
0
 def test_problem(self):
     tools.test(['run'])
Example #11
0
def fw_train():
    env = Visualizer(opt.env)
    split_info = parse_split_info(opt.split_train_val)
    valid_dataset = TripletLossDataset(train_root=opt.data_root,
                                       nb_select_class=2,
                                       nb_select_items=6,
                                       nb_time=1,
                                       is_train=False,
                                       specified_id_list=split_info['query'])
    valid_dataloader = DataLoader(dataset=valid_dataset, batch_size=1)
    cmc_valid_dataset = EncoderDataset(data_root=opt.data_root,
                                       resize_size=opt.resize_size,
                                       specific_id_list=split_info['query'])
    cmc_dataloader = DataLoader(dataset=cmc_valid_dataset, batch_size=12)
    reid_metric = CMC(dataloader=cmc_dataloader)

    batch_hard_loss = BatchHardTripletLoss()

    def exp_lr_scheduler(optimizer, iters, iters_all=20000):
        lr = 0.01 * (0.2**(iters / iters_all))
        if iters % 20000 == 0:
            print 'LR is set to {}'.format(lr)
        for param in optimizer.param_groups:
            param['lr'] = lr
        return optimizer

    model = ReidNetHardTrip()
    min_valid_loss = 1e6
    model = nn.DataParallel(model)
    if opt.use_gpu:
        model.cuda()
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    dataset = NewDataset(
        train_root=
        "/home/xksj/Data/lp/re-identification/cuhk03-src/cuhk03_release/detected",
        batch_size=110,
        max_per_id=10,
        is_train=True,
        resize_size=opt.resize_size,
        iter_sz=20000,
        nb_time=1,
        specified_id_list=split_info['trainval'])
    train_loader = DataLoader(dataset=dataset, batch_size=1)

    for iteration, (labels, inputs) in enumerate(train_loader):
        optimizer = exp_lr_scheduler(optimizer, iteration)

        dat_list = [labels[0], inputs[0]]
        dat_list = [Variable(x) for x in dat_list]

        if opt.use_gpu:
            dat_list = [x.cuda() for x in dat_list]
        labels, inputs = dat_list
        model.train()
        features = model(inputs)
        batchloss = batch_hard_loss(features, labels)
        loss_log = batchloss.cpu().data[0]
        env.plot("loss", loss_log)
        optimizer.zero_grad()
        batchloss.backward()
        optimizer.step()

        if iteration % 20 == 0:
            model.eval()
            rk_dict = test(model, opt.resize_size)
            top1 = rk_dict[1]
            top5 = rk_dict[5]
            env.plot("fw_top1", top1)
            env.plot("fw_top5", top5)
        """
Example #12
0
 def test_gitlabci(self):
     tools.test(['gitlabci'])
Example #13
0
def train(**kwargs):
    opt.parse(kwargs)

    env = Visualizer(opt.env)
    split_info = parse_split_info(opt.split_train_val)
    train_dataset = TripletLossDataset(
        train_root=opt.data_root,
        nb_select_class=18,
        nb_select_items=10,
        is_train=True,
        nb_time=1,
        specified_id_list=split_info['trainval'],
        resize_size=opt.resize_size)
    train_dataloader = DataLoader(dataset=train_dataset, batch_size=1)
    valid_dataset = TripletLossDataset(train_root=opt.data_root,
                                       nb_select_class=2,
                                       nb_select_items=6,
                                       nb_time=1,
                                       is_train=False,
                                       specified_id_list=split_info['query'],
                                       resize_size=opt.resize_size)
    valid_dataloader = DataLoader(dataset=valid_dataset, batch_size=1)
    cmc_valid_dataset = EncoderDataset(data_root=opt.data_root,
                                       specific_id_list=split_info['query'],
                                       resize_size=opt.resize_size)
    cmc_dataloader = DataLoader(dataset=cmc_valid_dataset, batch_size=12)
    reid_metric = CMC(dataloader=cmc_dataloader)

    reid_model = getattr(models, opt.model_name)(resnet_50=True)
    min_valid_loss = 1e6
    reid_model = nn.DataParallel(reid_model)
    if opt.use_gpu:
        reid_model.cuda()
    # optimizer = optim.SGD(reid_model.parameters(), lr=opt.lr, momentum=0.9)
    optimizer = optim.Adam(reid_model.parameters(), lr=opt.lr)
    criterion = BatchHardTripletLoss()

    epoch_size = opt.epoch_size
    scheduler = StepLR(optimizer=optimizer,
                       step_size=10000 / len(train_dataset),
                       gamma=0.2)
    top1_best = -1
    fw_top1_best = -1

    # Schedule learning rate

    def adjust_lr(epoch):
        t0 = 40
        lr = opt.lr if epoch <= t0 else \
            opt.lr * (0.001 ** ((epoch - t0) / opt.epoch_size - t0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    for epoch in range(epoch_size):

        reid_model.train()
        scheduler.step(epoch)
        for ii, (identity_list,
                 image_data_list) in enumerate(train_dataloader):
            optimizer.zero_grad()
            image_data_list = image_data_list[0]
            identity_list = identity_list.t()
            dat_list = [identity_list, image_data_list]
            dat_list = [Variable(x) for x in dat_list]

            if opt.use_gpu:
                dat_list = [x.cuda() for x in dat_list]
            identity_list, image_data_list = dat_list
            feature_list = reid_model.forward(image_data_list)
            loss = criterion(feature_list, identity_list)
            loss.backward()
            optimizer.step()
            loss_log = loss.cpu().data[0]
            env.plot("loss", loss_log)
            print("epoch {} batch {} has loss {}".format(epoch, ii, loss_log))

        reid_model.eval()
        rk_dict = test(reid_model, opt.resize_size)
        top1 = rk_dict[1]
        top5 = rk_dict[5]
        env.plot("fw_top1", top1)
        env.plot("fw_top5", top5)
        """
        reid_model.eval()
        top1, top5, top10 = reid_metric.cmc(reid_model)
        env.plot("top1", top1)
        env.plot("top5", top5)
        env.plot("top10", top10)
        """
        # test loss
        val_loss = val(reid_model, valid_dataloader, criterion, env)
        # scheduler.step(val_loss)

        if top1 > top1_best:
            top1_best = top1
            prefix = 'checkpoints/' + "ReidTriLoss_resnet50_top1" + '_' + str(
                top1) + "_"
            name = time.strftime(prefix + '%m%d_%H:%M:%S.pth')
            torch.save(reid_model.state_dict(), name)
Example #14
0
 def test_constraints(self):
     tools.test(['constraints', '-e'])
Example #15
0
 def test_stats(self):
     tools.test(['stats'])
Example #16
0
 def test_test(self):
     tools.test(['test', 'submissions/accepted/author.c'])
     tools.test(['test', 'submissions/accepted/author.c', '--samples'])
     tools.test(['test', 'submissions/accepted/author.c', 'data/sample'])
     tools.test(
         ['test', 'submissions/accepted/author.c', 'data/sample/1.in'])
     tools.test(
         ['test', 'submissions/accepted/author.c', 'data/sample/1.ans'])
     tools.test([
         'test', 'submissions/accepted/author.c', 'data/sample/1',
         'data/sample/2'
     ])
Example #17
0
 def test_generate(self):
     tools.test(['generate'])
Example #18
0
 def test_zip(self):
     tools.test(['zip', '--force'])
     Path('../A.zip').unlink()
Example #19
0
'''
使用训练集csv对神经网络进行训练
使用测试集csv对神经网络进行测试
并评估得分
'''
from myNeuralNetwork import n
import tools
import shelve

if __name__ == '__main__':
    # 训练
    # 参数 1 神经网络
    # 参数 2 训练用的数据集
    # 参数 3 训练几遍
    tools.train(n, './mnist_dataset/mnist_train.csv', 1)

    # 测试
    # 参数 1 神经网络
    # 参数 2 测试用数据集
    ret = tools.test(n, './mnist_dataset/mnist_test.csv')

    print("==" * 20)
    print("成绩 = ", ret)

Example #20
0
 def test_run(self):
     tools.test(['run'])
     # pass testcases
     tools.test(['run', 'data/sample'])
     tools.test(['run', 'data/secret/seeding', 'data/sample/1.in'])
     # pass submission
     tools.test(['run', 'submissions/accepted/author.cpp'])
     # pass submissions + testcases
     tools.test(
         ['run', 'data/sample/1.in', 'submissions/accepted/author.cpp'])
     tools.test([
         'run', 'submissions/accepted/author.c',
         'submissions/accepted/author.cpp', '--samples'
     ])
Example #21
0
 def test_tmp(self):
     tools.test(['tmp'])
Example #22
0
 def test_pdf(self):
     tools.test(['pdf'])
Example #23
0
 def test_bad_submission(self, bad_submission):
     with pytest.raises(SystemExit) as e:
         tools.test(['run', str(bad_submission)])
Example #24
0
 def test_validate(self):
     tools.test(['validate'])
Example #25
0
def setup_contest(request):
    contest_dir = RUN_DIR / 'test/problems'
    os.chdir(contest_dir)
    yield
    tools.test(['tmp', '--clean'])
    os.chdir(RUN_DIR)
Example #26
0
 def test_samplezip(self):
     tools.test(['samplezip'])
     Path('../samples.zip').unlink()
Example #27
0
 def test_sort(self):
     tools.test(['sort'])
     tools.test(['sort', '--contest', '.'])
     tools.test(['sort', '--contest', str(Path.cwd())])
     tools.test(['sort', '--problem', 'identity'])
     tools.test(['sort', '--problem', str(Path.cwd() / 'identity')])
# Given an array of integers where 1 ≤ a[i] ≤ n (n = size of array), some elements appear twice and others appear once.
# Find all the elements of [1, n] inclusive that do not appear in this array.
# Could you do it without extra space and in O(n) runtime? You may assume the returned list does not count as extra space.

# Example 1:
# Input: [4,3,2,7,8,2,3,1]
# Output: [5,6]

# Example 2:
# Input: [1,1]
# Output: [2]

# Example 3:
# Input: []
# Output: []

# Example 4:
# Input: [2,2]
# Output: [1]

def findDisappearedNumbers(nums):
    expected = set(list(range(1,len(nums)+1)))
    return list(expected.difference(set(nums)))

from tools import test
inputs = [[4,3,2,7,8,2,3,1],[1,1],[],[2,2],[1,1,2,2]]
outputs = [[5,6],[2],[],[1],[3,4]]
funcs = [findDisappearedNumbers]
test(inputs, outputs, funcs)
Example #29
0
 def test_solutions(self):
     tools.test(['solutions'])