Beispiel #1
0
def train(meta_files):

    if not os.path.exists(config.OUTPUT_DIR):
        os.mkdir(config.OUTPUT_DIR)

    #recognizer=cv2.face.EigenFaceRecognizer_create()
    #recognizer=cv2.face.FisherFaceRecognizer_create()
    recognizer = cv2.face.LBPHFaceRecognizer_create()

    global_accuracy = 0
    print('\nProcessing...')

    [list_img, list_label], num_sample = load_batch(meta_files[0])
    if num_sample < 1:
        return

    recognizer.train(list_img, np.array(list_label))
    train_accuracy = eval_model(recognizer, meta_files[0])
    val_accuracy = eval_model(recognizer, meta_files[1])
    test_accuracy = eval_model(recognizer, meta_files[2])

    print(
        '\nRESULT: Number of images: %d, Train accuracy: %g, Test accuracy: %g'
        % (num_sample, train_accuracy, test_accuracy))

    recognizer.save(os.path.join(config.OUTPUT_DIR, config.OUTPUT_MODEL_FILE))
Beispiel #2
0
def train(meta_files):
    if len(meta_files) < 3:
        meta_files = idm.init_dataset_meta()
    if not os.path.exists(config.OUTPUT_DIR):
        os.mkdir(config.OUTPUT_DIR)

    recognizer = cv2.face.LBPHFaceRecognizer_create()

    #global_accuracy = 0

    [list_img, list_label], num_sample = load_batch(meta_files[0])
    if num_sample < 1:
        print('Err: 0 sample found')

    recognizer.train(list_img, np.array(list_label))
    #recognizer.update(list_img, np.array(list_label))

    train_accuracy = eval_model(recognizer, meta_files[0])
    val_accuracy = eval_model(recognizer, meta_files[1])
    test_accuracy = eval_model(recognizer, meta_files[2])

    print('Train accuracy =  %f' % (train_accuracy))
    print('Test accuracy = %f' % (test_accuracy))
    print('Validate accuracy = %f' % (val_accuracy))

    recognizer.write(os.path.join(config.OUTPUT_DIR, config.OUTPUT_MODEL_FILE))
Beispiel #3
0
def run(n_epochs):
    is_best = np.inf

    for epoch in range(n_epochs):

        for n, images in enumerate(dataset.train_loader):
            niter = epoch * len(dataset.train_loader) + n  # count gradient updates

            # training
            model.train()
            images = Variable(images).cuda()
            optimizer.zero_grad()
            output, mu, log_var = model(images)
            loss = helpers.vae_loss(output, images, mu=mu, logvar=log_var, batch_size=batch_size, img_size=img_size, nc=nc)
            loss.backward()
            optimizer.step()
            train_loss = loss.data[0]
            writer.add_scalar('Loss/Train', train_loss, niter)

        if epoch % log_interval == 0:

            # testing
            model.eval()
            test_loss = 0
            for n, images in enumerate(dataset.test_loader):
                images = Variable(images).cuda()
                output, mu, log_var = model(images)
                test_loss += helpers.vae_loss(output, images, mu, log_var, batch_size, img_size, nc).data[0]
            test_loss /= len(dataset.test_loader)   # average over all iterations
            writer.add_scalar('Loss/Test', test_loss, epoch)

            if test_loss < is_best:
                is_best = test_loss
                torch.save(model.state_dict(), '{}/vae.pkl'.format(save_dir))
                writer.add_text('best epoch', 'saved model at epoch {}'.format(epoch), 0)

            if print_output:
                print("Epoch [{}/{}], Gradient Step: {}, Train Loss: {:.4f}, Test Loss: {:.4f}"
                    .format(epoch, num_epochs, (epoch + 1) * len(dataset.train_loader), train_loss, test_loss))

            # inspect reconstruction quality
            n_samples = 4
            data = dataset.load_batch(n_samples)  # Get a batch of test data
            input = Variable(data, volatile=True).cuda()  # TODO what does volatile do here?
            output, _, _ = model(input)
            noise = Variable(torch.randn(n_samples, nz, 1, 1).cuda())  # fake_images = generator(noise)
            generations = model.generator(noise)

            sequence = torch.cat((input.data.cpu(), output.data.cpu(), generations.data.cpu()), 0)
            grid = helpers.convert_image_np(torchvision.utils.make_grid(sequence, n_samples))

            writer.add_image('Input, Reconstruction, Generation', grid, epoch)
Beispiel #4
0
def eval_model(recognizer, meta_file):

    [list_img, list_label], num_sample = load_batch(meta_file)
    if num_sample < 1:
        return 0

    check = 0
    for i in range(num_sample):
        prediction, _ = recognizer.predict(list_img[i])

        check += prediction == list_label[i]

    return float(check) / num_sample
Beispiel #5
0
def eval_model(recognizer, meta_file):
    if not os.path.exists(meta_file):
        print(meta_file, "doesn't exits!")

    [list_img, list_lable], num_sample = load_batch(meta_file)

    if num_sample < 1:
        return 0

    check = 0
    for i in range(num_sample):
        predection, _ = recognizer.predict(list_img[i])
        check += predection == list_lable[i]
    return float(check) / num_sample
Beispiel #6
0
def train(self, epochs, batch_size=1, sample_interval=50):
        gan=CycleGan()
        start_time = datetime.datetime.now()
        batch_size=1
        valid = np.ones((batch_size,16,16,1))
        fake = np.zeros((batch_size,16,16,2))

        for epoch in tqdm(range(epochs):)
            for batch_i, (imgs_A, imgs_B) in enumerate(dataset.load_batch(batch_size)):

       
                fake_B = gan.g_AB.predict(imgs_A)
                fake_A = gan.g_BA.predict(imgs_B)

                dA_loss_real = gan.d_A.train_on_batch(imgs_A, valid)
                dA_loss_fake = gan.d_A.train_on_batch(fake_A, fake)
                dA_loss = 0.5 * np.add(dA_loss_real, dA_loss_fake)

                dB_loss_real = gan.d_B.train_on_batch(imgs_B, valid)
                dB_loss_fake = gan.d_B.train_on_batch(fake_B, fake)
                dB_loss = 0.5 * np.add(dB_loss_real, dB_loss_fake)

                d_loss = 0.5 * np.add(dA_loss, dB_loss)



                g_loss = gan.combined.train_on_batch([imgs_A, imgs_B],
                                                        [valid, valid,
                                                        imgs_A, imgs_B,
                                                        imgs_A, imgs_B])

                elapsed_time = datetime.datetime.now() - start_time

                print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %05f, adv: %05f, recon: %05f, id: %05f] time: %s " \
                                                                        % ( epoch, epochs,
                                                                            batch_i, len(dataset.load_batch()),
                                                                            d_loss[0], 100*d_loss[1],
                                                                            g_loss[0],
                                                                            np.mean(g_loss[1:3]),
                                                                            np.mean(g_loss[3:5]),
                                                                            np.mean(g_loss[5:6]),
                                                                            elapsed_time))
Beispiel #7
0
        writer.add_scalar('Score/Fake', fake_score, niter)
        writer.add_scalar('Instance Noise', noise_level, niter)

    if epoch % log_interval == 0:
        if print_output:
            print(
                "Epoch [{}/{}], Step [{}/{}], Loss_g: {:.4f}, Loss_d: {:.4f}, Real Score: {:.2f}, Fake Score: {:.2f}"
                .format(epoch, num_epochs, n, num_batches, g_loss, d_loss,
                        real_score, fake_score))
            print("VAE loss: {}".format(vae_loss))

        n_samples = 4
        netE.eval()
        netG.eval()

        data = Variable(dataset.load_batch(n_samples)).cuda()
        reconstructions, _, _ = reconstruct(data)

        originals = data.data.cpu()
        inputs = inputs = helpers.add_gaussian_noise(data,
                                                     noise_level).data.cpu()
        fake_input = helpers.add_gaussian_noise(reconstructions,
                                                noise_level).data.cpu()
        reconstructions = reconstructions.data.cpu()
        generations = sample(n_samples).view(-1, nc, img_size,
                                             img_size).data.cpu()

        sequence = torch.cat(
            (originals, inputs, fake_input, reconstructions, generations), 0)
        grid = helpers.convert_image_np(
            torchvision.utils.make_grid(sequence, n_samples))