コード例 #1
0
ファイル: using_gz.py プロジェクト: spyphy/cnn
                  eta=0.2,
                  k_eta=0.99998,
                  teaching=False)
print('out:  ' + str(res_out))
print('goal: ' + str(res_goal))
res = [
    1 if res_out[i] == res_goal[i] else 0 for i in range(0, mini_batch_size)
]
print(res)

res_out = net.SGD(training_data,
                  validation_data,
                  shared_image_data,
                  mini_batch_size,
                  epochs=1,
                  eta=0.2,
                  k_eta=0.99998,
                  teaching=False)
print('out:  ' + str(res_out))
print('goal: ' + str(res_goal))
res = [
    1 if res_out[i] == res_goal[i] else 0 for i in range(0, mini_batch_size)
]
print(res)

x = image_data[0]
num = 8
digit = x[num]
visual.show_image(digit)
visual.save_image_png(digit)
コード例 #2
0
    options = {'test_size': test_size, 'no_of_pipeline': no_of_pipeline}
    return options


window1 = preestimator()
window2 = None  # start off with 1 window open
while True:  # Event Loop
    window, event, values = sg.read_all_windows()

    if event in (sg.WIN_CLOSED, 'Exit', 'Submit'):
        window.close()
        if window == window1:
            break
        elif window == window2:
            window2 = None

    if event == 'Estimator':
        parsePreestimators()
        window2 = estimator()

    elif event == 'Submit':
        parseEstimators()
        #additional_param()
        additional_parameters = additional_param()
        path = main(additional_parameters)
        pop = sg.Popup('Submit')

        if pop == 'OK':
            window.close
            show_image(path)
コード例 #3
0
def main(args, device):
    # Initialize datasets and loaders
    datasetGenerator = dataset.DatasetGenerator(
        args.dataset, args.train_batch, args.test_batch, args.data_path,
        args.trans_arg, args.transform, args.delta_path + args.noise + '.pt')

    train_loader, validation_loader, test_loader = datasetGenerator.get_data_loaders(
    )

    # Initialize model
    model = args.dataset if args.model is None else args.model
    net = get_model(model).to(device)
    if args.load_model is not None:
        net.load_state_dict(
            torch.load(args.model_path + args.load_model + '.pt'))

    # Initialize attacks and trainer
    trainer = Trainer(device, net)
    pgd_max = attacks.PGDMax(device, args.epsilon, args.epsilon / 10,
                             args.iterations, args.restarts)
    fgsm = attacks.FGSM(device, args.epsilon)

    if args.todo == "train_nat":
        # Train the network in a natural manner
        trainer.train(train_loader, validation_loader, args.epoch,
                      args.model_path + args.save)
        test_trained_model(net, args.model_path + args.save + '.pt', trainer,
                           pgd_max, fgsm, test_loader)
        return

    elif args.todo == "train_pgd":
        # Train the network using adversarial training
        trainer.train(train_loader,
                      validation_loader,
                      args.epoch,
                      args.model_path + args.save,
                      attack=pgd_max)
        test_trained_model(net, args.model_path + args.save + '.pt', trainer,
                           pgd_max, fgsm, test_loader)
        return

    elif args.todo == "train_fgsm":
        # Train the network using adversarial training
        trainer.train(train_loader,
                      validation_loader,
                      args.epoch,
                      args.model_path + args.save,
                      attack=fgsm)
        test_trained_model(net, args.model_path + args.save + '.pt', trainer,
                           pgd_max, fgsm, test_loader)
        return

    elif args.todo == "test":
        # Test performance of trained model model
        test_trained_model(net, args.model_path + args.load_model + '.pt',
                           trainer, pgd_max, fgsm, test_loader)
        return

    elif args.todo == "show_noise":
        index = 32  # Index of image showed in the visualization of data augmentation techniques
        images, _ = next(iter(train_loader))
        noise = torch.load(args.delta_path + args.noise + '.pt')

        # Multiply noise to ensure it is visible when plotted
        for i in range(len(noise)):
            noise[i] = noise[i].mul(100)

        visual.show_images(noise, 10, 10, save=args.result_path + 'noise')
        visual.show_images(images, 3, 3, save=args.result_path + 'images')

        visual.show_image(images[index],
                          save=args.result_path + 'img_' + str(index))
        visual.show_image(noise[index],
                          save=args.result_path + 'noise_' + str(index))
        return

    perturbation = NoiseGenerator(device, net, args.epsilon, args.iterations,
                                  args.max_iter, args.train_step,
                                  args.stop_error)

    if args.todo == "sample_wise":
        # Generate sample wise error minimizing noise
        perturbation.sample_wise(train_loader,
                                 save=args.delta_path + args.save)
        return

    elif args.todo == "class_wise":
        # Generate class wise error minimizing noise
        perturbation.class_wise(train_loader,
                                num_classes=10,
                                save=args.delta_path + args.save)
        return

    else:
        raise NotImplementedError
コード例 #4
0
ファイル: test.py プロジェクト: saaduts/automl
from result_dir import make_dir_if_not_exists
from visual import show_image

show_image('RESULT/2021_05_01_22_27_31/PLOT')
コード例 #5
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('code', type=str)
    parser.add_argument('-i', '--image', action='store_true')
    parser.add_argument('-p', '--preview', action='store_true')
    parser.add_argument('-j', '--use-json', action='store_true')
    args = parser.parse_args()

    if args.use_json:
        with open(args.code) as f:
            code = json.load(f)
    else:
        code_parts = args.code.split('-')
        code = {'N': int(code_parts[0]),  # grid size NxN
                'S': code_parts[1],  # Ships
                'T': code_parts[2]}  # Attacks

    ships = game.create_ships(code['S'])
    attacks = game.create_attacks(code['T'])
    result = game.eval_game(ships, attacks)
    print(result)

    if args.image or args.preview:
        image = visual.draw_game((code['N'], code['N']), ships, attacks)

    if args.image:
        image.save('grid_battle_ship.jpg', quality=95)

    if args.preview:
        visual.show_image(image)