Ejemplo n.º 1
0
def make_gif():
    if (os.path.exists('latent_best.h5')):
        generator = load_model('latent_best.h5')

    imgs = []
    location = [0, 0]
    velocity = [0, 0]
    acceleration = [0, 0]
    noise = np.random.rand(2) * 10
    # print(acceleration.shape)
    for i in range(150):
        noise[0] = noise[0] + 0.05
        noise[1] = noise[1] + 0.05
        acceleration[0] = pnoise1(noise[0])
        acceleration[1] = pnoise1(noise[1])
        velocity[0] = velocity[0] + acceleration[0]
        velocity[1] = velocity[1] + acceleration[1]
        velocity = np.clip(velocity, -50, 50)
        location[0] = location[0] + velocity[0]
        location[1] = location[1] + velocity[1]

        if (location[0] < -100 or location[0] > 100):
            velocity[0] = velocity[0] * -1
        if (location[1] < -100 or location[1] > 100):
            velocity[1] = velocity[1] * -1

        print(location[0], location[1])
        # location = np.add(velocity, location)
        z_sample = np.array([[location[0], location[1]]])
        x_decoded = generator.predict(z_sample)
        img = x_decoded[0].reshape(100, 100, 3)
        imgs.append(img)

    gif.build_gif(imgs, saveto='clout.gif')
Ejemplo n.º 2
0
    def train(self, input_vects):
        """
        Trains the SOM.
        'input_vects' should be an iterable of 1-D NumPy arrays with
        dimensionality as provided during initialization of this SOM.
        Current weightage vectors for all neurons(initially random) are
        taken as starting conditions for training.
        """

        #Training iterations
        for iter_no in range(self._n_iterations):
            print(iter_no)
            if (iter_no % 1 == 0) & (iter_no > 0):

                self.map_plot(iter_no)
            centroid_grid = [[] for i in range(self._m)]
            self._weightages = list(self._sess.run(self._weightage_vects))
            self._locations = list(self._sess.run(self._location_vects))

            for i, loc in enumerate(self._locations):
                centroid_grid[loc[0]].append(self._weightages[i])
            self._centroid_grid = centroid_grid

            #Train with each vector one by one
            for input_vect in input_vects:
                self._sess.run(self._training_op,
                               feed_dict={
                                   self._vect_input: input_vect,
                                   self._iter_input: iter_no
                               })
        print(iter_no)
        self.map_plot(iter_no)
        self._trained = True
        gif.build_gif(imgs, saveto='exoplaneta005s6 .gif')
    for batch_i in range(n_batches):

        # Get just minibatch amount of data
        idxs_i = idxs[batch_i * batch_size:(batch_i + 1) * batch_size]

        # And optimize, also returning the cost so we can monitor
        # how our optimization is doing.
        training_cost = sess.run([cost, optimizer],
                                 feed_dict={
                                     X: xs[idxs_i],
                                     Y: ys[idxs_i]
                                 })[0]

    # Also, every 20 iterations, we'll draw the prediction of our
    # input xs, which should try to recreate our image!
    if (it_i + 1) % gif_step == 0:
        costs.append(training_cost / n_batches)
        ys_pred = Y_pred.eval(feed_dict={X: xs}, session=sess)
        img = np.clip(ys_pred.reshape(img.shape), 0, 1)
        imgs.append(img)
        # Plot the cost over time
        fig, ax = plt.subplots(1, 2)
        ax[0].plot(costs)
        ax[0].set_xlabel('Iteration')
        ax[0].set_ylabel('Cost')
        ax[1].imshow(img)
        fig.suptitle('Iteration {}'.format(it_i))
        plt.show()

_ = gif.build_gif(imgs, saveto='single.gif', show_gif=False)
Ejemplo n.º 4
0
            # Also, every 20 iterations, we'll draw the prediction of our
            # input xs, which should try to recreate our image!
            if (it_i + 1) % gif_step == 0:
                costs.append(training_cost / n_batches)
                ys_pred = model['Y_pred'].eval(feed_dict={model['X']: xs},
                                               session=sess)
                img = ys_pred.reshape(imgs.shape)
                gifs.append(img)
        return gifs


celeb_imgs = utils.get_celeb_imgs()
plt.figure(figsize=(10, 10))
plt.imshow(utils.montage(celeb_imgs).astype(np.uint8))
# It doesn't have to be 100 images, explore!
imgs = np.array(celeb_imgs).copy()
gifs = train(imgs=imgs)

montage_gifs = [
    np.clip(utils.montage((m * 127.5) + 127.5), 0, 255).astype(np.uint8)
    for m in gifs
]
_ = gif.build_gif(montage_gifs, saveto='multiple.gif')

final = gifs[-1]
final_gif = [
    np.clip(((m * 127.5) + 127.5), 0, 255).astype(np.uint8) for m in final
]
gif.build_gif(final_gif, saveto='final.gif')
Ejemplo n.º 5
0
def stylize(content_img, style_img, base_img=None, saveto=None, gif_step=5,
            n_iterations=300, style_weight=0.8, content_weight=0.6):
	"""Stylization w/ the given content and style images.

    Follows the approach in Leon Gatys et al.

    Parameters
    ----------
    content_img : np.ndarray
        Image to use for finding the content features.
    style_img : TYPE
        Image to use for finding the style features.
    base_img : None, optional
        Image to use for the base content.  Can be noise or an existing image.
        If None, the content image will be used.
    saveto : str, optional
        Name of GIF image to write to, e.g. "stylization.gif"
    gif_step : int, optional
        Modulo of iterations to save the current stylization.
    n_iterations : int, optional
        Number of iterations to run for.
    style_weight : float, optional
        Weighting on the style features.
    content_weight : float, optional
        Weighting on the content features.

    Returns
    -------
    stylization : np.ndarray
        Final iteration of the stylization.
    """
	# Preprocess both content and style images
	global synth
	content_img = make_4d(content_img)
	style_img = make_4d(style_img)
	if base_img is None:
		base_img = content_img
	else:
		base_img = make_4d(base_img)
	
	# Get Content and Style features
	net = get_vgg_model()
	g = tf.Graph()
	with tf.Session(graph=g) as sess:
		tf.import_graph_def(net['graph_def'], name='vgg')
		names = [op.name for op in g.get_operations()]
		print(names)
		x = g.get_tensor_by_name(names[0] + ':0')
		content_layer = 'vgg/conv5_2/conv5_2:0'
		content_features = g.get_tensor_by_name(
				content_layer).eval(feed_dict={
			x: content_img,
			'vgg/dropout_1/random_uniform:0': [[1.0] * 4096],
			'vgg/dropout/random_uniform:0': [[1.0] * 4096]
		})
		style_layers = ['vgg/conv1_1/conv1_1:0',
		                'vgg/conv2_1/conv2_1:0',
		                # 'vgg/conv3_1/conv3_1:0',
		                # 'vgg/conv4_1/conv4_1:0',
		                'vgg/conv5_1/conv5_1:0']
		style_activations = []
		for style_i in style_layers:
			style_activation_i = g.get_tensor_by_name(style_i).eval(
					feed_dict={
						x: style_img,
						'vgg/dropout_1/random_uniform:0': [[1.0] * 4096],
						'vgg/dropout/random_uniform:0': [[1.0] * 4096]
					})
			style_activations.append(style_activation_i)
		style_features = []
		for style_activation_i in style_activations:
			s_i = np.reshape(style_activation_i,
			                 [-1, style_activation_i.shape[-1]])
			gram_matrix = np.matmul(s_i.T, s_i) / s_i.size
			style_features.append(gram_matrix.astype(np.float32))
	
	# Optimize both
	g = tf.Graph()
	with tf.Session(graph=g) as sess:
		net_input = tf.Variable(base_img)
		tf.import_graph_def(
				net['graph_def'],
				name='vgg',
				input_map={'images:0': net_input})
		
		content_loss = tf.nn.l2_loss((g.get_tensor_by_name(content_layer) -
		                              content_features) /
		                             content_features.size)
		style_loss = np.float32(0.0)
		for style_layer_i, style_gram_i in zip(style_layers, style_features):
			layer_i = g.get_tensor_by_name(style_layer_i)
			layer_shape = layer_i.get_shape().as_list()
			layer_size = layer_shape[1] * layer_shape[2] * layer_shape[3]
			layer_flat = tf.reshape(layer_i, [-1, layer_shape[3]])
			gram_matrix = tf.matmul(
					tf.transpose(layer_flat), layer_flat) / layer_size
			style_loss = tf.add(
					style_loss, tf.nn.l2_loss(
							(gram_matrix - style_gram_i) /
							np.float32(style_gram_i.size)))
		loss = content_weight * content_loss + style_weight * style_loss
		optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)
		
		sess.run(tf.global_variables_initializer())
		imgs = []
		for it_i in range(n_iterations):
			_, this_loss, synth = sess.run(
					[optimizer, loss, net_input],
					feed_dict={
						'vgg/dropout_1/random_uniform:0': np.ones(
								g.get_tensor_by_name(
										'vgg/dropout_1/random_uniform:0'
								).get_shape().as_list()),
						'vgg/dropout/random_uniform:0': np.ones(
								g.get_tensor_by_name(
										'vgg/dropout/random_uniform:0'
								).get_shape().as_list())
					})
			print("iteration %d, loss: %f, range: (%f - %f)" %
			      (it_i, this_loss, np.min(synth), np.max(synth)), end='\r')
			if it_i % 5 == 0:
				m = deprocess(synth[0])
				# imgs.append(m)
				plt.imshow(m)
				plt.savefig('mixed'+str(it_i) + '.png')
			if it_i % gif_step == 0:
				imgs.append(np.clip(synth[0], 0, 1))
		if saveto is not None:
			build_gif(imgs, saveto=saveto)
	return np.clip(synth[0], 0, 1)