Пример #1
0
def main(config):
    acc_dict = {}
    trainset, valset = CIFAR100('data/')
    train_dict, val_dict = continual_wrapper(trainset, valset, num_tasks=10)
    for i in range(3, 10):
        del train_dict[i]
        del val_dict[i]

    net = seresnet20_cifar100().to(config.device)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(net.parameters(),
                                lr=config.lr,
                                momentum=config.mo)
    for task in range(config.num_tasks):
        trainloader = torch.utils.data.DataLoader(train_dict[task],
                                                  batch_size=config.bs)
        valloader = torch.utils.data.DataLoader(val_dict[task],
                                                batch_size=config.bs)
        learn(net, trainloader, valloader, criterion, optimizer, config)
        acc_dict[task] = {}
        for i in range(task + 1):
            valloader = torch.utils.data.DataLoader(val_dict[i],
                                                    batch_size=config.bs)
            acc_dict[task][i] = test(net, valloader, criterion, config)[1]
    visualise(acc_dict)
Пример #2
0
if __name__ == '__main__':
    board = board_init(6, 6)

    if sys.argv[1] != "0":
        file_name = sys.argv[1]
    else:
        env_parameter = [6, 6, 0, 33]
        agent_parameter = [5, 0.2, 1.0, 0.9,
                           0.0]  #numActions, epsilon, gamma, alpha, init value

        task_env, task_agent = experiment.RL_init(env_parameter,
                                                  agent_parameter)
        task_agent.behavior_value = np.load("B_value.npy")
        task_agent.Q_value = np.load("Q.npy")
        experiment.test(task_env, task_agent, 200)
        file_name = "track.csv"

    with open(file_name, 'rb') as f:
        reader = csv.reader(f)
        track = list(reader)

    print "Start..."

    i = 0
    count = 0
    for item in track:
        current_board = copy.deepcopy(board)
        i += 1
        x, y = utils.to_cord(int(item[0]), 6)
        current_board[5 - y][x] = '@'
Пример #3
0

if not os.path.exists('results/0_results.p'):
    print "No results found! Please run some setupExperiments.py before viewing results!"
    quit()

with open('results/0_results.p') as f:
    x = pickle.load(f)
    params = [y[0] for y in x[0]['params']]

tab = PrettyTable(["Run", "NumEvals", "ValObj"] + params + ["TestObj"])
tab.align = 'l'
print "Generating Results Table/Testing Hyperparameters..."
lb = LoadBar(np.min([numSplits, 30]))
lb.setup()
for i in range(numSplits):
    if lb.test(i, numSplits): lb += 1
    with open("results/" + str(i) + "_results.p") as f:
        x = pickle.load(f)
    objectives = np.array([a['objective'] for a in x])
    cMin = x[np.argmin(objectives)]
    paramDict = dict(cMin['params'])
    sys.path.append(str(i))
    from experiment import test
    acc = test('experiments/' + str(i) + "/", paramDict)
    tab.add_row([i, len(objectives), "{:.4f}".format(cMin['objective'])]
                + ["{:.4f}".format(paramDict[p][0]) for p in params]
                + ["{:.4f}".format(acc)])
lb.clear()
print tab
 def testTestFGSMSVHN(self):
     test(num_gpus=2,
          total_batch_size=200,
          image_size=28,
          summary_dir='debug/summary/caps/svhn/Default/',
          load_test_path='debug/data/caps/svhn/test_FGSM.npz')
 def testTestMNIST(self):
     test(num_gpus=2,
          total_batch_size=200,
          image_size=28,
          summary_dir='debug/summary/caps/mnist/Default/',
          load_test_path='debug/data/caps/mnist/test.npz')
Пример #6
0
            print('\n{:-^50}'.format(' Network Initialized '))
            utils.print_network(model)
            print('-' * 50 + '\n')

            if args.stage == 'train' or args.stage == 'both':
                print(opts)
                experiment.train(opts,
                                 model,
                                 training_dataset,
                                 validation_dataset,
                                 opts.train_epochs,
                                 resume_from_epoch=resume_from_epoch)

            if args.stage == 'test' or args.stage == 'both':
                print('\n{:-^50}'.format(' Testing Model '))
                experiment.test(opts, model, testing_dataset, save_loss=True)

        opts.jobs += 1

    if opts.hyperparameter == False:

        # Selected hyperparameters
        param_grid = {
            'lr': 1e-4,
            'batch_size': 4,
            'loss_type': 'l1',
            'loss_formulation': 'absolute',
            'image_normalization': False,
            'compensated_target': False,
        }
Пример #7
0
secondsPerSplit = params['secondsPerSplit']

if not os.path.exists('results/0_results.p'):
    print "No results found! Please run some setupExperiments.py before viewing results!"
    quit()

with open('results/0_results.p') as f:
    x = pickle.load(f)
    params = [y[0] for y in x[0]['params']]

tab = PrettyTable(["Run", "NumEvals", "ValObj"] + params + ["TestObj"])
tab.align = 'l'
print "Generating Results Table/Testing Hyperparameters..."
lb = LoadBar(np.min([numSplits, 30]))
lb.setup()
for i in range(numSplits):
    if lb.test(i, numSplits): lb += 1
    with open("results/" + str(i) + "_results.p") as f:
        x = pickle.load(f)
    objectives = np.array([a['objective'] for a in x])
    cMin = x[np.argmin(objectives)]
    paramDict = dict(cMin['params'])
    sys.path.append(str(i))
    from experiment import test
    acc = test('experiments/' + str(i) + "/", paramDict)
    tab.add_row([i, len(objectives), "{:.4f}".format(cMin['objective'])] +
                ["{:.4f}".format(paramDict[p][0])
                 for p in params] + ["{:.4f}".format(acc)])
lb.clear()
print tab