Ejemplo n.º 1
0
def train():

    os.makedirs(os.path.join(FLAGS.logdir, 'sample'))
    writer = SummaryWriter(os.path.join(FLAGS.logdir))

    with open(os.path.join(FLAGS.logdir, "flagfile.txt"), 'w') as f:
        f.write(FLAGS.flags_into_string())

    writer.add_text("flagfile",
                    FLAGS.flags_into_string().replace('\n', '  \n'))

    dataset = ImgCOCO()

    train_dataloader = DataLoader(dataset,
                                  batch_size=FLAGS.batch_size,
                                  shuffle=True,
                                  collate_fn=collate_fn)

    model = RNNModel(hidden_size=FLAGS.hidden_dim, num_layers=FLAGS.num_layers)
    print_parameters(model)

    model = model.cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=FLAGS.lr)

    step = 0
    for epoch in range(FLAGS.total_epochs):
        print('[Epoch %d]' % epoch)
        with trange(1, len(train_dataloader), dynamic_ncols=True) as pbar:
            for batch in train_dataloader:
                input_ids, label_ids = batch

                input_ids = input_ids.cuda()
                label_ids = label_ids.cuda()

                output = model(input_ids)
                loss = criterion(output.view(-1, vocab_size),
                                 label_ids.flatten())

                model.zero_grad()
                loss.backward()
                optimizer.step()

                writer.add_scalar("loss", loss.item(), step)
                writer.add_scalar("perplexity", torch.exp(loss).item(), step)

                step += 1

                pbar.set_postfix(loss="%.4f" % loss)
                pbar.update(1)
Ejemplo n.º 2
0
def main():
    params = {'batch_size': args.batch_size,
              'shuffle': True,
              'num_workers': 1}

    train_generator, test_generator, vocab, vocab_tokens = dataloader.load_sentimix_tokens(params,
                                                                                               binary=True,
                                                                                               use_balanced_loader=False,
                                                                                               language=args.language)

    train_generator = dataset.DataLoaderDevice(train_generator, DEVICE)
    test_generator = dataset.DataLoaderDevice(test_generator, DEVICE)

    model = Subword_STE(vocab_size=len(vocab),
                        vocab_size_tokens=len(vocab_tokens),
                        embedding_dim=args.emb_dim,
                        embedding_dim_tokens=args.emb_dim_tokens,
                        hidden_dim=args.hidden_dim,
                        hidden_dim_tokens=args.hidden_dim_tokens,
                        dropout_prob=args.dropout_prob,
                        share_emb=False)

    utils.initialize_model_(model)

    print("Language: ", args.language)
    print("Vocab size: %d" % len(vocab))
    print("Tokens: %s" % " ".join(vocab_tokens._i2w))

    vocab.save_to_file(PATH)
    vocab_tokens.save_to_file(PATH)

    if args.print_stats:
        utils.print_parameters(model)
        print('Data loaded!\nTrain batches: %d\nTest batches: %d\nVocab size: %d\nNumber of labels %d' %
              (len(train_generator), len(test_generator), len(vocab), vocab.num_labels))

        utils.print_dataset_statistics(train_generator)
        print(model)

    model = model.to(DEVICE)

    train(model, train_generator, test_generator, vocab, vocab_tokens)
Ejemplo n.º 3
0
    def __init__(self):

        # load images and scale in range (-1, 1)
        self.images_loader = utils.load_images('datasets/patterns/')
        # dataset = datasets.CIFAR10(root='./cifar10', train=True, download=True, transform=transform)
        # dataloader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, num_workers=20, drop_last=True)

        # Define the network
        self.generator = utils.load_network('generator', dcgan.Generator())
        self.discriminator = utils.load_network('discriminator',
                                                dcgan.Discriminator())

        # print total and trainable parameters for networks
        utils.print_parameters(self.generator, 'Generator')
        utils.print_parameters(self.discriminator, 'Discriminator')

        self.adversarial_loss = torch.nn.BCELoss().cuda(
        ) if cuda else torch.nn.BCELoss()

        # set up optimisers
        self.optimizer_D = torch.optim.RMSprop(self.discriminator.parameters(),
                                               lr=args.lr_d)
        self.optimizer_G = torch.optim.RMSprop(self.generator.parameters(),
                                               lr=args.lr_g)

        # set up LR schedulers
        self.scheduler_D = StepLR(self.optimizer_D,
                                  step_size=args.lr_step,
                                  gamma=args.lr_gamma)
        self.scheduler_G = StepLR(self.optimizer_G,
                                  step_size=args.lr_step,
                                  gamma=args.lr_gamma)

        # create latent vectors to visualize the progression of the generator
        self.fixed_noise = Variable(
            Tensor(
                np.random.normal(0, args.s_sd,
                                 (args.batch_size, args.latent_dim))))
Ejemplo n.º 4
0
def main():

    # Reading parameters from para.dat file
    parameters = utils.read_parameter_file()
    
    # Printing parameters for user
    utils.print_parameters(parameters)

    # Defining Hamiltonian
    H = HAMILTONIAN(**parameters)

    # Defines the model, and precomputes evolution matrices given set of states
    model = MODEL(H, parameters)
    
    L = 6
    T = 0.1
    n_step = 28 
    param = {'L' : L, 'T': T, 'n_step': n_step}
    file_name = make_file_name(param, root= "/projectnb/fheating/SGD/ES/dynamicQL/SA/ES/data/")

    with open(file_name, 'rb') as f:
	    fidelities=pickle.load(f)

    nfid=fidelities.shape[0]
    fid_and_energy=np.empty((nfid,2),dtype=np.float)

    for i,f in zip(range(nfid),fidelity):
        if i%10000 == 0: print(i)
        model.update_protocol(b2_array(i, w = 28))
        psi = model.compute_evolved_state()
        fid_and_energy[i][0]=model.compute_fidelity(psi_evolve = psi)
        fid_and_energy[i][1]=model.compute_energy(psi_evolve = psi)
        print(fid_and_energy[0],'\t',f)
        break

    with open("ES_L-06_T-0.500_n_step-28-test.pkl", ‘wb’) as f:
	    fidelities=pickle.dump(fid_and_energy,f, protocol=4)
Ejemplo n.º 5
0
    def __init__(self):

        # load images and scale in range (-1, 1)
        self.images_loader = utils.load_images('datasets/patterns/')
        # dataset = datasets.CIFAR10(root='./cifar10', train=True, download=True, transform=transform)
        # dataloader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, num_workers=20, drop_last=True)

        # load networks
        if args.architecture == 'ResNet':
            self.encoder = utils.load_network('encoder', waae_resnet.Encoder())
            self.decoder = utils.load_network('decoder', waae_resnet.Decoder())
            self.discriminator = utils.load_network(
                'discriminator', waae_resnet.Discriminator())

        # print total and trainable parameters for networks
        utils.print_parameters(self.encoder, 'Encoder')
        utils.print_parameters(self.decoder, 'Decoder')
        utils.print_parameters(self.discriminator, 'Discriminator')

        self.reconstruct_loss = torch.nn.MSELoss().cuda(
        ) if cuda else torch.nn.MSELoss()

        # set up optimizers
        self.optimizer_R = torch.optim.Adam(itertools.chain(
            self.encoder.parameters(), self.decoder.parameters()),
                                            lr=args.lr_R,
                                            betas=(args.b1, args.b2))
        self.optimizer_D = torch.optim.Adam(self.discriminator.parameters(),
                                            lr=args.lr_D,
                                            betas=(args.b1, args.b2))
        #self.optimizer_D = torch.optim.SGD(self.discriminator.parameters(), lr=args.lr_D, momentum=0.9, dampening=0, weight_decay=1e-4)
        self.optimizer_G = torch.optim.Adam(self.encoder.parameters(),
                                            lr=args.lr_G,
                                            betas=(args.b1, args.b2))

        # set up LR schedulers
        self.scheduler_R = StepLR(self.optimizer_R,
                                  step_size=args.lr_step,
                                  gamma=args.lr_gamma)
        self.scheduler_D = StepLR(self.optimizer_D,
                                  step_size=args.lr_step,
                                  gamma=args.lr_gamma)
        self.scheduler_G = StepLR(self.optimizer_G,
                                  step_size=args.lr_step,
                                  gamma=args.lr_gamma)

        # create batch of latent vectors to visualize the progression of the generator
        self.fixed_noise = Variable(
            Tensor(np.random.normal(0, args.s_sd, (100, args.latent_dim))))
Ejemplo n.º 6
0
optimizerAuto = torch.optim.Adam(autoencoder.parameters(), lr=LEARNING_RATE)

sacade_rnn = sacade_rnn.to(DEVICE)
classifier = classifier.to(DEVICE)
autoencoder = autoencoder.to(DEVICE)
Cel = Cel.to(DEVICE)

# ##########
# Training #
# ##########
print("torch version : ", torch.__version__)
print("Device : ", DEVICE)
print("Nombre d'eleves : ", voc.num_user)
# print("Nombre d'eleve reduit : ", voc.user2index.keys())

print_parameters(sacade_rnn)
print_parameters(classifier)
print_parameters(autoencoder)

# , Valid : {len(valid_loader)} ")
print(f"Train : {len(train_loader)*BATCH_SIZE}")

for i_epoch in range(NB_EPOCHS):
    for i_batch, batch in enumerate(train_loader):
        # print(i_batch)
        optimizerGru.zero_grad()
        optimizerClas.zero_grad()

        sessions, lengths, userids = batch
        sessions = sessions.to(DEVICE)
        userids = userids.to(DEVICE)