コード例 #1
0
def worker_main(creation_queue, pipe, worker_id):
    '''The code for a worker. First the worker helps fill the database with
    players. Then the worker will simulate its players.'''
    # We need a try block to cleanly exit when ctrl-c is pressed.
    try:
        # Setup our reporting
        db = Database()
        reporter = Reporter(pipe)

        # We are responsible for the following range of players.
        players = range(worker_id * PLAYERS_PER_WORKER,
                        (worker_id + 1) * PLAYERS_PER_WORKER)

        # Add all our players to the db.
        for player in players:
            db.add_player(player)

        # Mark our worker as done initializing.
        creation_queue.get()
        creation_queue.task_done()
        # Wait for all workers to be done.
        creation_queue.join()

        # Now simulate actions.
        while True:
            simulate_play(db, random.choice(players))
            reporter.mark_event()
    except KeyboardInterrupt:
        pass
コード例 #2
0
ファイル: benchmark.py プロジェクト: aghyad/bench
def worker_main(creation_queue, pipe, worker_id):
    '''The code for a worker. First the worker helps fill the database with
    players. Then the worker will simulate its players.'''
    # We need a try block to cleanly exit when ctrl-c is pressed.
    try:
        # Setup our reporting
        db = Database()
        reporter = Reporter(pipe)

        # We are responsible for the following range of players.
        players = range(worker_id * PLAYERS_PER_WORKER, (worker_id + 1) * PLAYERS_PER_WORKER)

        # Add all our players to the db.
        for player in players:
            db.add_player(player)

        # Mark our worker as done initializing.
        creation_queue.get()
        creation_queue.task_done()
        # Wait for all workers to be done.
        creation_queue.join()

        # Now simulate actions.
        while True:
            simulate_play(db, random.choice(players))
            reporter.mark_event()
    except KeyboardInterrupt:
        pass
コード例 #3
0
 def testGetConnection(self):
     report=Reporter(config=reportConfig)
     res = report.reportGet('[{"data":"get"}]')
     logging.debug("ok"+str(res))
コード例 #4
0
    elif triplet_method == "batch_hardv2":
        loss_fn = losses.BatchHardTripletLoss_v2(margin=margin,
                                                 squared=False,
                                                 soft_margin=soft_margin)

    elif triplet_method == "batch_all":
        loss_fn = losses.BatchAllTripletLoss(margin=margin,
                                             squared=False,
                                             soft_margin=soft_margin)
    # rt = '/nfs/nas4/marzieh/marzieh/puf/ckpt/batch_hardv2/'
    # model_filename = rt + 'Run004,modelTriplet,Epoch_345,acc_0.999688.tar'
    # model_filename = Reporter(ckpt_root=os.path.join(ROOT_DIR, 'ckpt'),
    #                           exp=triplet_method, monitor='acc').select_best(run=run_name).selected_ckpt

    model_filename = Reporter(
        ckpt_root=os.path.join(ROOT_DIR, 'ckpt'),
        exp=triplet_method,
        monitor='acc001').select_best(run='X' + run_name).selected_ckpt
    print(model_filename)
    model = models.modelTriplet(embedding_dimension=emb_dim,
                                model_architecture=model_name,
                                pooling=pooling)
    model.to(device)
    model.load_state_dict(torch.load(model_filename)['model_state_dict'])

    print('Evaluating model on test data')
    batch_all = losses.BatchAllTripletLoss(margin=margin,
                                           squared=False,
                                           soft_margin=soft_margin)
    t = 0
    nonzeros = 0
    triplet_loss_sum = 0
コード例 #5
0
    elif triplet_method == "batch_all":
        loss_fn = losses.BatchAllTripletLoss(margin=margin,
                                             squared=False,
                                             soft_margin=soft_margin)

    model = models.modelTriplet(embedding_dimension=emb_dim,
                                model_architecture=model_name,
                                pretrained=False,
                                pooling=pooling)
    model.to(device)
    #  --------------------------------------------------------------------------------------
    #  Resume training if start is False
    #  --------------------------------------------------------------------------------------
    if not start:
        reporter = Reporter(ckpt_root=os.path.join(ROOT_DIR, 'ckpt'),
                            exp=triplet_method,
                            monitor='acc')
        last_model_filename = reporter.select_last(run=run_name).selected_ckpt
        last_epoch = int(reporter.select_last(run=run_name).last_epoch)
        loss0 = reporter.select_last(run=run_name).last_loss
        loss0 = float(loss0[:-4])
        model.load_state_dict(
            torch.load(last_model_filename)['model_state_dict'])
    else:
        last_epoch = -1
        loss0 = 0

    optimizer_model = torch.optim.Adam(model.parameters(), lr=lr)
    path_ckpt = '{}/ckpt/{}'.format(ROOT_DIR, triplet_method)
    # learning embedding checkpointer.
    ckpter = CheckPoint(model=model,
コード例 #6
0
#!/usr/bin/env python3

from utils import Reporter

if __name__ == '__main__':
    reporter = Reporter()
    reporter.report_test_success()
コード例 #7
0
#!/usr/bin/env python3

from utils import Reporter


if __name__ == '__main__':
    reporter = Reporter()
    reporter.report_test_failure()
コード例 #8
0
#!/usr/bin/env python3

from utils import Reporter

if __name__ == '__main__':
    reporter = Reporter()
    reporter.report_release_success()
コード例 #9
0
#!/usr/bin/env python3

from utils import Reporter

if __name__ == '__main__':
    reporter = Reporter()
    reporter.report_release_failure()
コード例 #10
0
ファイル: train.py プロジェクト: ppak1991cy/GAN-pytorch
def train():

    # Essential train elements
    image_dataset = GANImageDataset(dataset_path)
    image_dataiter = DataIterator(dataset=image_dataset,
                                  batch_size=8,
                                  shuffle=True)
    net_d = Discriminator().to(device)
    net_g = Generator().to(device)

    lr = 1e-4
    optimizer_g = torch.optim.Adam(net_d.parameters(), lr=lr, betas=(0, 0.9))
    optimizer_d = torch.optim.Adam(net_g.parameters(), lr=lr, betas=(0, 0.9))
    one = torch.FloatTensor([1]).to(device)
    mone = one * -1

    # Record items
    keys_d = ["fake", "real", "gp", "d_loss"]
    reporter_d = Reporter(keys_d,
                          t="discriminator",
                          interval=1,
                          file_path=None)
    keys_g = ["g_loss"]
    reporter_g = Reporter(keys_g, t="generator", interval=1, file_path=None)
    writer = SummaryWriter()

    fixed_noise = gen_rand_noise()
    with torch.no_grad():
        fixed_noisev = fixed_noise

    # Training process
    for iteration in range(ITERS):

        # 1. Update netD(discriminator)
        for p in net_d.parameters():  # reset requires_grad
            p.requires_grad_(
                True)  # they are set to False below in netG update
        for i in range(CRITIC_ITERS):
            with timer("Critic iter: " + str(iteration) + "/" + str(i)):
                net_d.zero_grad()
                noise = gen_rand_noise()
                with torch.no_grad():
                    noisev = noise
                fake_data = net_g(noisev).detach()
                real_data = next(image_dataiter)

                # Discriminator loss define
                d_fake = net_d(
                    fake_data).mean()  # Expection of discriminating fake data
                d_real = net_d(
                    real_data).mean()  # Expection of discriminating real data
                gradient_penalty = calc_gradient_penalty(
                    net_d, real_data, fake_data)
                d_loss = d_fake - d_real + gradient_penalty
                # Update parameters
                d_loss.backward(one)
                optimizer_d.step()

            # Record "fake", "real", "gp", "d_loss"
            record_d = [
                d_fake.data, d_real.data, gradient_penalty.data, d_loss.data
            ]
            record_d = [float(r) for r in record_d]
            reporter_d.recive(record_d)
            if i == CRITIC_ITERS - 1:
                reporter_d.report(iteration)
                writer.add_scalar("data/d_loss", d_loss, iteration)
                writer.add_scalar("data/gradient_penalty", gradient_penalty,
                                  iteration)

        # 2. Update netG(generator)
        for p in net_d.parameters():
            p.requires_grad_(False)
        for i in range(GENER_ITERS):
            with timer("Generator iters: " + str(iteration) + "/" + str(i)):
                net_g.zero_grad()
                noise = gen_rand_noise()
                noise.requires_grad_(True)
                fake_data = net_g(noise)

                # Generator loss define
                g_loss = net_d(fake_data).mean()
                # Update parameters
                g_loss.backward(mone)
                optimizer_g.step()

            # Record "g_loss"
            record_g = [g_loss.data]
            record_g = [float(r) for r in record_g]
            reporter_g.recive(record_g)
            if i == GENER_ITERS - 1:
                reporter_g.report(iteration)
                writer.add_scalar("data/d_loss", -g_loss, iteration)

        # Save intermediate models and images
        if iteration % 200 == 199:
            v = (iteration + 1) / 200
            gen_images = net_g(
                fixed_noisev) * 0.5 + 0.5  # Scale from -1 ~ 1 to 0 ~ 1
            torchvision.utils.save_image(gen_images,
                                         os.path.join(
                                             IMAGES_PATH,
                                             "samples_{}.png".format(v)),
                                         nrow=BATCH_SIZE,
                                         padding=2)
            grid_images = torchvision.utils.make_grid(gen_images,
                                                      nrow=8,
                                                      padding=2)
            writer.add_image('images', grid_images, iteration)
            torch.save(
                net_d,
                os.path.join(MODEL_PATH, "discriminator_{}.pkg".format(v)))
            torch.save(net_g,
                       os.path.join(MODEL_PATH, "generator_{}.pkg".format(v)))
コード例 #11
0
def extract_features():
    torch.multiprocessing.set_start_method('spawn', force=True)
    ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    with open(r'{}/args.yaml'.format(ROOT_DIR)) as file:
        args_list = yaml.load(file, Loader=yaml.FullLoader)

    emb_dim = args_list['emb_dim']
    model_name = args_list['model_name']
    triplet_method = args_list['triplet_method']
    run_name = args_list['run_name']
    num_workers = args_list['num_workers']
    n_classes_test = args_list['n_classes_test']
    n_samples_test = args_list['n_samples_test']
    data_type = args_list['data']

    parser = argparse.ArgumentParser()
    parser.add_argument('--triplet_method',
                        '--tm',
                        type=str,
                        default=triplet_method,
                        help='triplet method (default: "batch_hard")')
    parser.add_argument('--run_name',
                        '--rn',
                        type=str,
                        default=run_name,
                        help='The name for this run (default: "Run01-hardv2")')
    parser.add_argument('--data_type',
                        '--data',
                        type=str,
                        default=data_type,
                        help='the data source')

    args = parser.parse_args()
    triplet_method = args.triplet_method
    run_name = args.run_name
    data_type = args.data_type
    pooling = False
    with open('{}/dataset.json'.format(ROOT_DIR), 'r') as fp:
        dataset = json.load(fp)
    partition = dataset['partition']
    labels = dataset['labels']
    # 'data_c1','data_c2','data_complex_c1','data_complex_c2','data_resized','data_complex_resized'
    # 'data_c1v2','data_c2v2','data_complex_c1v2','data_complex_c2v2','data_resizedv2','data_complex_resizedv2'

    if data_type in [
            'data_resizedv2', 'data_complex_resizedv2', 'data_c1v2',
            'data_complex_c1v2', 'data_c2v2', 'data_complex_c2v2'
    ]:
        pooling = True
        n_classes_test = 8
    data_x = partition['train'] + partition['validation'] + partition['test']
    data_y = {**labels['train'], **labels['validation'], **labels['test']}
    test_dataset = PairLoader(data_x, data_y, data_source=data_type)
    test_batch_sampler = BalanceBatchSampler(dataset=test_dataset,
                                             n_classes=n_classes_test,
                                             n_samples=n_samples_test)
    test_loader = DataLoader(test_dataset,
                             batch_sampler=test_batch_sampler,
                             num_workers=num_workers)
    model_filename = Reporter(
        ckpt_root=os.path.join(ROOT_DIR, 'ckpt'),
        exp=triplet_method,
        monitor='acc').select_best(run=run_name).selected_ckpt
    print(model_filename)
    model = models.modelTriplet(embedding_dimension=emb_dim,
                                model_architecture=model_name,
                                pooling=pooling)
    model.to(device)
    model.load_state_dict(torch.load(model_filename)['model_state_dict'])
    model.eval()
    with torch.no_grad():
        emb, tg = [], []
        for batch_idx, (data, target) in enumerate(test_loader):
            embedding = model(data).cpu().numpy()
            target = target.cpu().numpy()
            emb.append(embedding)
            tg.append(target)

        features = {'emb': emb, 'tg': tg}
        with open('features_1.json', 'w') as f_out:
            json.dump(features, f_out, cls=NumpyEncoder)