Beispiel #1
0
def g_train(g_model, d_model, g_optim, loss_function, real_labels, writer,
            args, git):
    """
    Training loop of generator
    """
    Z = generate_z(args).to(args.device)

    if not args.unpac:
        fake = torch.cat(torch.split(g_model(Z), args.batch_size // 2, dim=0),
                         1)
    else:
        fake = g_model(Z)
    d_fake = d_model(fake)

    if loss_function is None:
        g_loss = -torch.mean(d_fake)
    else:
        g_loss = loss_function(d_fake, real_labels)

    writer.add_scalar('g_loss', g_loss, git)

    g_optim.zero_grad()
    g_loss.backward()
    g_optim.step()
    summarize_grad(g_model, writer, git, 'g')

    return g_loss
Beispiel #2
0
def evolve():
    '''
    Evolve the view using the requested evolutions:

    If the evolution is specified as simple, use an 1:1
    implementation of the original DeepIE

    Else, evolve based on the desired, user specification

    Some sort of novelty search is performed if novelty=True
    Behaviorla novelty search can be toggle in Constants, but is very slow

    The response is 9 Z vectors
    '''
    evolved, body = [], {}
    request_json = request.get_json()
    evolution_specifications = request_json['specifications']
    novelty = request_json['novelty']
    mutation_rate = request_json['mutation']

    if 'SIMPLE' in evolution_specifications:
        selected_canvases = []
        for i in range(int(evolution_specifications[6])):
            selected_canvases.append(request_json[f'selected{i}'])
        zs = [request_json[f'z{i}'] for i in range(9)]
        evolved = simple_evolution(
            selected_canvases, zs, G, novelty, BEHAVIORAL, mutation_rate)
    else:
        evolution_specifications = evolution_specifications.split(',')
        for specification in evolution_specifications:
            if specification[0] == 'M':
                evolved.append(
                    mutate(request_json[f'z{specification[2]}'], mutation_rate))
            elif specification[0] == 'K':
                evolved.append(request_json[f'z{specification[2]}'])
            elif specification[0] == 'N' and not novelty:
                evolved.append(generate_z().tolist())
            elif specification[0] == 'C':
                evolved.append(crossover(
                    request_json[f'z{specification[2]}'], request_json[f'z{specification[5]}']))

        if novelty:
            evolved = behavioral_novelty_search(
                evolved, G) if BEHAVIORAL else novelty_search(evolved)

    for i in range(len(evolved)):
        body[f'z{i}'] = evolved[i]

    return jsonify(body)
Beispiel #3
0
def initialize_single():
    '''
    Initialize the view:

    Respond with coords of nonzeros, Z vector and camera placement information
    '''
    model_type = request.get_json()['model_type']
    body = {}
    z = generate_z()
    voxels = G.generate(z, model_type)
    coords = create_coords_from_voxels(voxels)
    body['z'] = z.tolist()
    body['coords'] = coords
    body['camera'] = CAMERA_PLANE if model_type == 'Plane' else CAMERA_CHAIR

    return jsonify(body)
Beispiel #4
0
    def train(self):
        self.train_hist = {}
        self.train_hist['D_loss'] = []
        self.train_hist['G_loss'] = []
        self.train_hist['E_loss'] = []
        self.train_hist['per_epoch_time'] = []
        self.train_hist['total_time'] = []

        if torch.cuda.is_available():
            self.y_real_, self.y_fake_ = Variable(
                torch.ones(self.batch_size, 1).cuda()), Variable(
                    torch.zeros(self.batch_size, 1).cuda())
        else:
            self.y_real_, self.y_fake_ = Variable(
                torch.ones(self.batch_size,
                           1)), Variable(torch.zeros(self.batch_size, 1))

        self.D.train()
        print('training start!!')
        start_time = time.time()

        #check_point =255
        #self.load(check_point)
        #print("loading " + str(check_point))
        for epoch in range(0, self.epoch):
            # reset training mode of G and E
            # print("Learning rate decay")
            self.G_optimizer.param_groups[0]['lr'] = self.args.lrG / np.sqrt(
                epoch + 1)
            self.D_optimizer.param_groups[0]['lr'] = self.args.lrD / np.sqrt(
                epoch + 1)
            # self.E_optimizer.param_groups[0]['lr'] = self.args.lrE / np.sqrt(epoch + 1)

            epoch_start_time = time.time()
            for iter, (X, _) in enumerate(self.train_loader):
                X = utils.to_var(X)
                """Discriminator"""
                z = utils.generate_z(self.batch_size, self.z_dim, self.prior)

                X_hat = self.G(z)
                D_real = self.D(self.FC(X))
                D_fake = self.D(self.FC(X_hat))
                X_hat = self.G(z)
                z_mu, z_sigma = self.E(self.FC(X_hat))

                # D_loss = self.BCE_loss(D_real, self.y_real_) + self.BCE_loss(D_fake, self.y_fake_)
                D_loss = -1.0 * torch.mean(D_real - torch.exp(D_fake - 1.0))

                self.train_hist['D_loss'].append(D_loss.data.item())
                # Optimize
                D_loss.backward()

                # gradient clipping
                torch.nn.utils.clip_grad_value_(
                    chain(self.D.parameters(), self.FC.parameters()), 1.0)

                self.D_optimizer.step()
                self.__reset_grad()
                """Encoder"""

                #z = utils.generate_z(self.batch_size, self.z_dim, self.prior)
                X_hat = self.G(z)
                z_mu, z_sigma = self.E(self.FC(X_hat))
                # - loglikehood
                E_loss = torch.mean(
                    torch.mean(
                        0.5 * (z - z_mu)**2 * torch.exp(-z_sigma) +
                        0.5 * z_sigma + 0.9189, 1))
                self.train_hist['E_loss'].append(E_loss.data.item())
                # Optimize
                E_loss.backward()
                self.E_optimizer.step()
                self.__reset_grad()
                """Generator"""
                # Use both Discriminator and Encoder to update Generator
                #z = utils.generate_z(self.batch_size, self.z_dim, self.prior)
                X_hat = self.G(z)
                D_fake = self.D(self.FC(X_hat))
                z_mu, z_sigma = self.E(self.FC(X_hat))
                mode_loss = torch.mean(
                    torch.mean(
                        0.5 * (z - z_mu)**2 * torch.exp(-z_sigma) +
                        0.5 * z_sigma + 0.9189, 1))

                # G_loss = self.BCE_loss(D_fake, self.y_real_)
                G_loss = -1.0 * torch.mean(
                    D_fake)  # deal with gradient vanish 2

                total_loss = G_loss + mode_loss
                self.train_hist['G_loss'].append(G_loss.data.item())
                # Optimize
                total_loss.backward()

                # gradient clipping
                torch.nn.utils.clip_grad_value_(chain(self.G.parameters()),
                                                1.0)

                self.G_optimizer.step()
                self.__reset_grad()
                """Plot"""
                if (
                        iter + 1
                ) == self.train_loader.dataset.__len__() // self.batch_size:
                    print(
                        'Epoch-{}; D_loss: {:.4}; G_loss: {:.4}; Mode_loss: {:.4}\n'
                        .format(epoch, D_loss.data.item(), G_loss.data.item(),
                                mode_loss.data.item()))

                    self.visualize_results(epoch + 1)
                    self.train_hist['per_epoch_time'].append(time.time() -
                                                             epoch_start_time)

                    if (epoch + 1) % 20 == 0:
                        print("Save Model")
                        self.save(epoch)

                    break

            # Save model every 5 epochs

        self.save(epoch)

        self.train_hist['total_time'].append(time.time() - start_time)
        #self.save(epoch)
        print("Avg one epoch time: %.2f, total %d epochs time: %.2f" %
              (np.mean(self.train_hist['per_epoch_time']), self.epoch,
               self.train_hist['total_time'][0]))
        print("Training finish!... save training results")
Beispiel #5
0
    def train(self):
        self.train_hist = {}
        self.train_hist['D_loss'] = []
        self.train_hist['G_loss'] = []
        self.train_hist['E_loss'] = []
        self.train_hist['per_epoch_time'] = []
        self.train_hist['total_time'] = []

        if torch.cuda.is_available():
            self.y_real_, self.y_fake_ = Variable(
                torch.ones(self.batch_size, 1).cuda()), Variable(
                    torch.zeros(self.batch_size, 1).cuda())
        else:
            self.y_real_, self.y_fake_ = Variable(
                torch.ones(self.batch_size,
                           1)), Variable(torch.zeros(self.batch_size, 1))

        self.D.train()
        print('training start!!')
        start_time = time.time()
        for epoch in range(self.epoch):
            # reset training mode of G and E
            self.G.train()
            self.E.train()
            self.FC.train()
            epoch_start_time = time.time()
            E_err = []
            D_err = []
            G_err = []

            for iter, (X, _) in enumerate(self.train_loader):
                X = utils.to_var(X)
                """Discriminator"""
                z = utils.generate_z(self.batch_size, self.z_dim, self.prior)
                X_hat = self.G(z)
                D_real = self.D(self.FC(X))
                D_fake = self.D(self.FC(X_hat))
                D_loss = self.BCE_loss(D_real, self.y_real_) + self.BCE_loss(
                    D_fake, self.y_fake_)
                self.train_hist['D_loss'].append(D_loss.data[0])
                D_err.append(D_loss.data[0])
                # Optimize
                D_loss.backward()
                if self.grad_clip:
                    torch.nn.utils.clip_grad_norm(
                        chain(self.D.parameters(), self.FC.parameters()),
                        self.grad_clip_val)
                self.D_optimizer.step()
                self.__reset_grad()

                # """Encoder"""
                # z = utils.generate_z(self.batch_size, self.z_dim, self.prior)
                # X_hat = self.G(z)
                # z_mu, z_sigma = self.E(self.FC(X_hat))
                # # - loglikehood
                # E_loss = torch.mean(torch.mean(0.5 * (z - z_mu) ** 2 * torch.exp(-z_sigma) + 0.5 * z_sigma + 0.5 * np.log(2*np.pi), 1))
                # self.train_hist['E_loss'].append(E_loss.data[0])
                # E_err.append(E_loss.data[0])
                # # Optimize
                # E_loss.backward()
                # self.E_optimizer.step()
                # self.__reset_grad()
                """Generator"""
                # Use both Discriminator and Encoder to update Generator
                z = utils.generate_z(self.batch_size, self.z_dim, self.prior)
                X_hat = self.G(z)
                D_fake = self.D(self.FC(X_hat))
                z_mu, z_sigma = self.E(self.FC(X_hat))
                E_loss = torch.mean(
                    torch.mean(
                        0.5 * (z - z_mu)**2 * torch.exp(-z_sigma) +
                        0.5 * z_sigma + 0.5 * np.log(2 * np.pi), 1))
                G_loss = self.BCE_loss(D_fake, self.y_real_)
                total_loss = G_loss + E_loss
                self.train_hist['G_loss'].append(G_loss.data[0])
                G_err.append(G_loss.data[0])
                E_err.append(E_loss.data[0])
                # Optimize
                total_loss.backward()
                if self.grad_clip:
                    torch.nn.utils.clip_grad_norm(self.G.parameters(),
                                                  self.grad_clip_val)
                    torch.nn.utils.clip_grad_norm(self.E.parameters(),
                                                  self.grad_clip_val)
                self.G_optimizer.step()
                self.E_optimizer.step()
                self.__reset_grad()
                """Plot"""
                if (
                        iter + 1
                ) == self.train_loader.dataset.__len__() // self.batch_size:
                    print(
                        'Epoch-{}; D_loss: {:.4}; G_loss: {:.4}; E_loss: {:.4}\n'
                        .format(epoch + 1, np.mean(D_err), np.mean(G_err),
                                np.mean(E_err)))

                    self.visualize_results(epoch + 1)

                    break

            self.train_hist['per_epoch_time'].append(time.time() -
                                                     epoch_start_time)

            # learning rate decay
            if self.lr_decay:
                rate = np.sqrt(epoch + 1) / np.sqrt(epoch + 2)
                self.G_optimizer.param_groups[0]['lr'] *= rate
                self.D_optimizer.param_groups[0]['lr'] *= rate
                self.E_optimizer.param_groups[0]['lr'] *= rate
                print("learning rate change!")

            # Save model
            if (epoch + 1) % 20 == 0:
                self.save()

        self.train_hist['total_time'].append(time.time() - start_time)
        print("Avg one epoch time: %.2f, total %d epochs time: %.2f" %
              (np.mean(self.train_hist['per_epoch_time']), self.epoch,
               self.train_hist['total_time'][0]))
        print("Training finish!... save training results")
        self.save()
Beispiel #6
0
def d_train(d_model, g_model, d_optim, loss_function, batch, writer, args):
    """
    Training loop of discriminator
    """
    if args.labels == 'noisy':
        real_labels = (torch.FloatTensor(args.batch_size //
                                         (1 if args.unpac else 2)).uniform_(
                                             0.85, 1.0)).to(args.device)
        fake_labels = (torch.FloatTensor(args.batch_size //
                                         (1 if args.unpac else 2)).uniform_(
                                             0.0, 0.15)).to(args.device)
    elif args.labels == 'hard':
        real_labels = torch.ones(args.batch_size //
                                 (1 if args.unpac else 2)).to(args.device)
        fake_labels = torch.zeros(args.batch_size //
                                  (1 if args.unpac else 2)).to(args.device)
    elif args.labels == 'd_hard':
        real_labels = torch.FloatTensor([
            0.9 for _ in range(args.batch_size // (1 if args.unpac else 2))
        ]).to(args.device)
        fake_labels = torch.zeros(args.batch_size //
                                  (1 if args.unpac else 2)).to(args.device)

    if not args.unpac:
        X = torch.cat(torch.split(batch, args.batch_size // 2, dim=0),
                      1).view(-1, 2, args.cube_len, args.cube_len,
                              args.cube_len).to(args.device)
    else:
        X = batch.view(-1, 1, args.cube_len, args.cube_len,
                       args.cube_len).to(args.device)

    d_total_acc = 0.0
    it = 0

    while it < args.d_iter and d_total_acc < args.d_low_thresh:
        args.current_iteration = args.current_iteration + 1
        Z = generate_z(args).to(args.device)

        d_real = d_model(X)
        if loss_function is None:
            d_real_loss = -torch.mean(d_real)
        else:
            d_real_loss = loss_function(d_real, real_labels)

        if not args.unpac:
            fake = torch.cat(
                torch.split(g_model(Z), args.batch_size // 2, dim=0), 1)
        else:
            fake = g_model(Z)

        d_fake = d_model(fake)

        if loss_function is None:
            d_fake_loss = torch.mean(d_fake)
        else:
            d_fake_loss = loss_function(d_fake, fake_labels)

        if args.gan_type in ['wgan_gp']:
            grad_pen = calc_gradient_penalty(d_model, X, fake, args)
        else:
            grad_pen = 0.0

        d_loss = (d_fake_loss + d_real_loss) + grad_pen

        if args.gan_type in ['wgan_gp']:
            writer.add_scalar('grad_pen', grad_pen, args.current_iteration)
        if args.gan_type in ['dcgan']:
            d_real_acc = torch.ge(d_real.squeeze(), 0.5).float()
            d_fake_acc = torch.le(d_fake.squeeze(), 0.5).float()
            d_total_acc = torch.mean(torch.cat((d_real_acc, d_fake_acc), 0))
            writer.add_scalar('d_total_acc', d_total_acc,
                              args.current_iteration)

        writer.add_scalar('d_real_loss', d_fake_loss, args.current_iteration)
        writer.add_scalar('d_fake_loss', d_fake_loss, args.current_iteration)
        writer.add_scalar('d_total_loss', d_fake_loss, args.current_iteration)

        if not (args.gan_type in ['dcgan']
                and d_total_acc > args.d_high_thresh):
            d_optim.zero_grad()
            d_loss.backward()
            d_optim.step()

        summarize_grad(d_model, writer, args.current_iteration, 'd')
        it += 1

    return d_total_acc, real_labels
Beispiel #7
0
    def train(self):
        self.train_hist = {}
        self.train_hist['D_loss'] = []
        self.train_hist['G_loss'] = []
        self.train_hist['E_loss'] = []
        self.train_hist['per_epoch_time'] = []
        self.train_hist['total_time'] = []

        if torch.cuda.is_available():
            self.y_real_, self.y_fake_ = Variable(
                torch.ones(self.batch_size, 1).cuda()), Variable(
                    torch.zeros(self.batch_size, 1).cuda())
        else:
            self.y_real_, self.y_fake_ = Variable(
                torch.ones(self.batch_size,
                           1)), Variable(torch.zeros(self.batch_size, 1))

        self.D.train()
        print('training start!!')
        start_time = time.time()
        for epoch in range(self.epoch):
            # reset training mode of G and E
            self.G.train()
            self.E.train()
            epoch_start_time = time.time()
            for iter, (X, _) in enumerate(self.train_loader):
                X = utils.to_var(X)
                """Discriminator"""
                z = utils.generate_z(self.batch_size, self.z_dim, self.prior)
                x_noise = utils.generate_z(self.batch_size, self.z_dim,
                                           self.prior)
                X_hat = self.G(z)
                z_hat = self.E(X, x_noise)
                D_real = self.D(X, z_hat)
                D_fake = self.D(X_hat, z)
                D_loss = self.BCE_loss(D_real, self.y_real_) + self.BCE_loss(
                    D_fake, self.y_fake_)
                self.train_hist['D_loss'].append(D_loss.data[0])

                # Optimize
                D_loss.backward()
                self.D_optimizer.step()  # update D
                self.__reset_grad()
                """Generator and Encoder"""
                # Use both Discriminator and Encoder to update Generator
                z = utils.generate_z(self.batch_size, self.z_dim, self.prior)
                x_noise = utils.generate_z(self.batch_size, self.z_dim,
                                           self.prior)
                X_hat = self.G(z)
                D_fake = self.D(X_hat, z)
                z_hat = self.E(X_hat, x_noise)
                E_loss = torch.mean(torch.sum((z - z_hat)**2, 1))
                self.train_hist['E_loss'].append(E_loss.data[0])
                G_loss = self.BCE_loss(D_fake, self.y_real_)
                total_loss = G_loss + E_loss
                self.train_hist['G_loss'].append(G_loss.data[0])

                # Optimize
                total_loss.backward()
                self.E_optimizer.step()  # update E
                self.G_optimizer.step()  # update G
                self.__reset_grad()
                """Plot"""
                if (
                        iter + 1
                ) == self.train_loader.dataset.__len__() // self.batch_size:
                    print(
                        'Epoch-{}; D_loss: {:.4}; G_loss: {:.4}; E_loss: {:.4}\n'
                        .format(epoch, D_loss.data[0], G_loss.data[0],
                                E_loss.data[0]))

                    self.visualize_results(epoch + 1)

                    break

            self.train_hist['per_epoch_time'].append(time.time() -
                                                     epoch_start_time)

            # Save model every 5 epochs
            if epoch % 5 == 0:
                self.save()

        self.train_hist['total_time'].append(time.time() - start_time)
        print("Avg one epoch time: %.2f, total %d epochs time: %.2f" %
              (np.mean(self.train_hist['per_epoch_time']), self.epoch,
               self.train_hist['total_time'][0]))
        print("Training finish!... save training results")
        self.save()
Beispiel #8
0
    def train(self, config):
        d_optim = tf.train.AdamOptimizer(config.learning_rate,
                                         beta1=config.beta1).minimize(
                                             self.d_loss, var_list=self.d_vars)
        g_optim = tf.train.AdamOptimizer(config.learning_rate,
                                         beta1=config.beta1).minimize(
                                             self.g_loss, var_list=self.g_vars)
        tf.global_variables_initializer().run()

        self.g_sum = summarys['merge']([
            self.z_sum, self.d__sum, self.G_sum, self.d_loss_fake_sum,
            self.g_loss_sum
        ])
        self.d_sum = summarys['merge'](
            [self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
        self.writer = summarys['writer']('.logs', self.sess.graph)

        counter = 1
        start_time = time.time()
        could_load, checkpoint_counter = self.load(self.checkpoint_dir)
        if could_load:
            counter = checkpoint_counter
            print "[*] load success"
        else:
            print "[!] load field !!"

        for epoch in tqdm(range(config.epoch)):
            batch_idxs = min(len(self.data_x),
                             config.train_size) // config.batch_size
            for idx in range(0, batch_idxs):
                batch_images = self.data_x[idx * config.batch_size:(idx + 1) *
                                           config.batch_size]
                batch_labels = self.data_y[idx * config.batch_size:(idx + 1) *
                                           config.batch_size]
                batch_z = generate_z(self.batch_size, self.z_dim * 4)

                # update D network
                _, summary_str = self.sess.run(
                    [d_optim, self.d_sum],
                    feed_dict={
                        self.x_set[0]:
                        batch_images[:, 0:int(self.input_height /
                                              np.sqrt(self.num_patches)),
                                     0:int(self.input_width /
                                           np.sqrt(self.num_patches)), :],
                        self.x_set[1]:
                        batch_images[:, 0:int(self.input_height /
                                              np.sqrt(self.num_patches)),
                                     0:int(self.input_width /
                                           np.sqrt(self.num_patches)), :],
                        self.x_set[2]:
                        batch_images[:, 0:int(self.input_height /
                                              np.sqrt(self.num_patches)),
                                     0:int(self.input_width /
                                           np.sqrt(self.num_patches)), :],
                        self.x_set[3]:
                        batch_images[:, 0:int(self.input_height /
                                              np.sqrt(self.num_patches)),
                                     0:int(self.input_width /
                                           np.sqrt(self.num_patches)), :],
                        self.z_set[0]:
                        batch_z[:, 0:self.z_dim],
                        self.z_set[1]:
                        batch_z[:, self.z_dim:self.z_dim * 2],
                        self.z_set[2]:
                        batch_z[:, self.z_dim * 2:self.z_dim * 3],
                        self.z_set[3]:
                        batch_z[:, self.z_dim * 3:self.z_dim * 4],
                        self.y_set[0]:
                        np.concatenate([
                            batch_labels,
                            np.array([[0, 0]] * self.batch_size).astype(
                                np.float32)
                        ],
                                       axis=1),
                        self.y_set[1]:
                        np.concatenate([
                            batch_labels,
                            np.array([[0, 1]] * self.batch_size).astype(
                                np.float32)
                        ],
                                       axis=1),
                        self.y_set[2]:
                        np.concatenate([
                            batch_labels,
                            np.array([[1, 0]] * self.batch_size).astype(
                                np.float32)
                        ],
                                       axis=1),
                        self.y_set[3]:
                        np.concatenate([
                            batch_labels,
                            np.array([[1, 1]] * self.batch_size).astype(
                                np.float32)
                        ],
                                       axis=1)
                    })
                self.writer.add_summary(summary_str, counter)

                for i in range(config.g_epoch):
                    # update G network g_epoch times
                    _, summary_str = self.sess.run(
                        [g_optim, self.g_sum],
                        feed_dict={
                            self.z_set[0]:
                            batch_z[:, 0:self.z_dim],
                            self.z_set[1]:
                            batch_z[:, self.z_dim:self.z_dim * 2],
                            self.z_set[2]:
                            batch_z[:, self.z_dim * 2:self.z_dim * 3],
                            self.z_set[3]:
                            batch_z[:, self.z_dim * 3:self.z_dim * 4],
                            self.y_set[0]:
                            np.concatenate([
                                batch_labels,
                                np.array([[0, 0]] * self.batch_size).astype(
                                    np.float32)
                            ],
                                           axis=1),
                            self.y_set[1]:
                            np.concatenate([
                                batch_labels,
                                np.array([[0, 1]] * self.batch_size).astype(
                                    np.float32)
                            ],
                                           axis=1),
                            self.y_set[2]:
                            np.concatenate([
                                batch_labels,
                                np.array([[1, 0]] * self.batch_size).astype(
                                    np.float32)
                            ],
                                           axis=1),
                            self.y_set[3]:
                            np.concatenate([
                                batch_labels,
                                np.array([[1, 1]] * self.batch_size).astype(
                                    np.float32)
                            ],
                                           axis=1)
                        })
                    self.writer.add_summary(summary_str, counter)

                errD_fake = self.d_loss_fake.eval({
                    self.z_set[0]:
                    batch_z[:, 0:self.z_dim],
                    self.z_set[1]:
                    batch_z[:, self.z_dim:self.z_dim * 2],
                    self.z_set[2]:
                    batch_z[:, self.z_dim * 2:self.z_dim * 3],
                    self.z_set[3]:
                    batch_z[:, self.z_dim * 3:self.z_dim * 4],
                    self.y_set[0]:
                    np.concatenate([
                        batch_labels,
                        np.array([[0, 0]] * self.batch_size).astype(np.float32)
                    ],
                                   axis=1),
                    self.y_set[1]:
                    np.concatenate([
                        batch_labels,
                        np.array([[0, 1]] * self.batch_size).astype(np.float32)
                    ],
                                   axis=1),
                    self.y_set[2]:
                    np.concatenate([
                        batch_labels,
                        np.array([[1, 0]] * self.batch_size).astype(np.float32)
                    ],
                                   axis=1),
                    self.y_set[3]:
                    np.concatenate([
                        batch_labels,
                        np.array([[1, 1]] * self.batch_size).astype(np.float32)
                    ],
                                   axis=1)
                })
                errD_real = self.d_loss_real.eval({
                    self.x_set[0]:
                    batch_images[:, 0:int(self.input_height /
                                          np.sqrt(self.num_patches)),
                                 0:int(self.input_width /
                                       np.sqrt(self.num_patches)), :],
                    self.x_set[1]:
                    batch_images[:, 0:int(self.input_height /
                                          np.sqrt(self.num_patches)),
                                 0:int(self.input_width /
                                       np.sqrt(self.num_patches)), :],
                    self.x_set[2]:
                    batch_images[:, 0:int(self.input_height /
                                          np.sqrt(self.num_patches)),
                                 0:int(self.input_width /
                                       np.sqrt(self.num_patches)), :],
                    self.x_set[3]:
                    batch_images[:, 0:int(self.input_height /
                                          np.sqrt(self.num_patches)),
                                 0:int(self.input_width /
                                       np.sqrt(self.num_patches)), :],
                    self.y_set[0]:
                    np.concatenate([
                        batch_labels,
                        np.array([[0, 0]] * self.batch_size).astype(np.float32)
                    ],
                                   axis=1),
                    self.y_set[1]:
                    np.concatenate([
                        batch_labels,
                        np.array([[0, 1]] * self.batch_size).astype(np.float32)
                    ],
                                   axis=1),
                    self.y_set[2]:
                    np.concatenate([
                        batch_labels,
                        np.array([[1, 0]] * self.batch_size).astype(np.float32)
                    ],
                                   axis=1),
                    self.y_set[3]:
                    np.concatenate([
                        batch_labels,
                        np.array([[1, 1]] * self.batch_size).astype(np.float32)
                    ],
                                   axis=1)
                })
                errG = self.g_loss.eval({
                    self.z_set[0]:
                    batch_z[:, 0:self.z_dim],
                    self.z_set[1]:
                    batch_z[:, self.z_dim:self.z_dim * 2],
                    self.z_set[2]:
                    batch_z[:, self.z_dim * 2:self.z_dim * 3],
                    self.z_set[3]:
                    batch_z[:, self.z_dim * 3:self.z_dim * 4],
                    self.y_set[0]:
                    np.concatenate([
                        batch_labels,
                        np.array([[0, 0]] * self.batch_size).astype(np.float32)
                    ],
                                   axis=1),
                    self.y_set[1]:
                    np.concatenate([
                        batch_labels,
                        np.array([[0, 1]] * self.batch_size).astype(np.float32)
                    ],
                                   axis=1),
                    self.y_set[2]:
                    np.concatenate([
                        batch_labels,
                        np.array([[1, 0]] * self.batch_size).astype(np.float32)
                    ],
                                   axis=1),
                    self.y_set[3]:
                    np.concatenate([
                        batch_labels,
                        np.array([[1, 1]] * self.batch_size).astype(np.float32)
                    ],
                                   axis=1)
                })

                counter += 1
                if np.mod(counter, 100) == 1:
                    samples, d_loss, g_loss = self.sess.run(
                        [self.sampler, self.d_loss, self.g_loss],
                        feed_dict={
                            self.x_set[0]:
                            batch_images[:, 0:int(self.input_height /
                                                  np.sqrt(self.num_patches)),
                                         0:int(self.input_width /
                                               np.sqrt(self.num_patches)), :],
                            self.x_set[1]:
                            batch_images[:, 0:int(self.input_height /
                                                  np.sqrt(self.num_patches)),
                                         0:int(self.input_width /
                                               np.sqrt(self.num_patches)), :],
                            self.x_set[2]:
                            batch_images[:, 0:int(self.input_height /
                                                  np.sqrt(self.num_patches)),
                                         0:int(self.input_width /
                                               np.sqrt(self.num_patches)), :],
                            self.x_set[3]:
                            batch_images[:, 0:int(self.input_height /
                                                  np.sqrt(self.num_patches)),
                                         0:int(self.input_width /
                                               np.sqrt(self.num_patches)), :],
                            self.z_set[0]:
                            batch_z[:, 0:self.z_dim],
                            self.z_set[1]:
                            batch_z[:, self.z_dim:self.z_dim * 2],
                            self.z_set[2]:
                            batch_z[:, self.z_dim * 2:self.z_dim * 3],
                            self.z_set[3]:
                            batch_z[:, self.z_dim * 3:self.z_dim * 4],
                            self.y_set[0]:
                            np.concatenate([
                                batch_labels,
                                np.array([[0, 0]] * self.batch_size).astype(
                                    np.float32)
                            ],
                                           axis=1),
                            self.y_set[1]:
                            np.concatenate([
                                batch_labels,
                                np.array([[0, 1]] * self.batch_size).astype(
                                    np.float32)
                            ],
                                           axis=1),
                            self.y_set[2]:
                            np.concatenate([
                                batch_labels,
                                np.array([[1, 0]] * self.batch_size).astype(
                                    np.float32)
                            ],
                                           axis=1),
                            self.y_set[3]:
                            np.concatenate([
                                batch_labels,
                                np.array([[1, 1]] * self.batch_size).astype(
                                    np.float32)
                            ],
                                           axis=1)
                        })
                    save_images(
                        samples, image_manifold_size(samples.shape[0]),
                        './{}/train_{:02d}_{:04d}.png'.format(
                            config.sample_dir, epoch, idx))

                if np.mod(counter, 500) == 2:
                    self.save(config.checkpoint_dir, counter)

                print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" \
                      % (epoch, idx, batch_idxs,
                         time.time() - start_time, errD_fake + errD_real, errG))
Beispiel #9
0
from flask import Flask, jsonify, request, send_file
from flask_compress import Compress
from torch import Tensor
from generate import SuperGenerator
from utils import generate_z, create_coords_from_voxels, generate_binvox_file, calculate_camera
from evolution import mutate, crossover, simple_evolution, behavioral_novelty_search, novelty_search


# Constants
COMPRESS = Compress()
BEHAVIORAL = False
APP = Flask(__name__)
G = SuperGenerator()
CAMERA_PLANE = calculate_camera(
    [G.generate(generate_z(), 'Plane') for i in range(100)])
CAMERA_CHAIR = calculate_camera(
    [G.generate(generate_z(), 'Chair') for i in range(100)])


@APP.after_request
def add_cors(response):
    '''
    Add CORS header(s) to every response from valid sites

    Allow:
    - Origin from everywhere
    - Content-Type to be shown in headers
    - GET, POST and OPTIONS methos
    '''
    r = request.referrer[:-1]
    if r in ['http://localhost:8080', 'localhost:8080', 'https://localhost:8080',