def transform(paths, out_path, fnc, subfolders=True, arg=None):

    name = fnc.__name__

    if arg is not None:
        name += "_" + str(arg)

    result = []

    for image_path in paths:
        image = pImage.open(image_path)
        print('"%s" => %s' % (image_path, name))

        if arg is not None:
            transformed = fnc(image)
        else:
            transformed = fnc(image)

        if subfolders:
            in_path = path.dirname(path.dirname(image_path))
        else:
            in_path = path.dirname(image_path)

        transformed_path = add_suffix(in_path, out_path, image_path,
                                      '_' + name)

        if subfolders:
            makedirs(path.dirname(transformed_path), exist_ok=True)

        transformed.save(transformed_path)

        result.append(transformed_path)

    return result
Exemplo n.º 2
0
    def _build_rnn_graph(self, inputs, is_training):
        """Build the inference graph using canonical LSTM cells."""
        config = self._config

        def make_cell():
            cell = self._get_lstm_cell(config, is_training)
            if is_training and config.keep_prob < 1:
                cell = tf.contrib.rnn.DropoutWrapper(
                    cell, output_keep_prob=config.keep_prob)
            return cell

        cell = tf.contrib.rnn.MultiRNNCell(
            [make_cell() for _ in range(config.num_layers)],
            state_is_tuple=True)

        self._initial_state = cell.zero_state(config.batch_size, tf.float32)
        state = self._initial_state

        # Before unstack, inputs shape is [batch_size, num_steps, embedding_size]
        rnn_scope = "RNN"
        inputs = tf.unstack(inputs, num=self.num_steps, axis=1)
        outputs, state = tf.nn.static_rnn(cell,
                                          inputs,
                                          initial_state=self._initial_state,
                                          scope=rnn_scope)

        rnn_full_scope = utils.add_suffix(rnn_scope,
                                          tf.get_variable_scope().name)
        rnn_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                     scope=rnn_full_scope)

        output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size])
        return output, state, rnn_vars
Exemplo n.º 3
0
    def evaluate_model(self, data, reuse=None, training=True, suffix=None):

        # Unpack features from data
        images, labels = data

        # Classify input images
        probs, logits = self.classifier(images,
                                        training=training,
                                        reuse=reuse,
                                        name=add_suffix("classifier", suffix))

        # Compute softmax cross entropy loss
        loss = self.compute_cross_entropy(logits,
                                          labels,
                                          name=add_suffix("loss", suffix))

        return images, labels, probs, logits, loss
Exemplo n.º 4
0
    def evaluate_model(self, z, data, reuse=None, training=True, suffix=None):

        # Generate predicted images from noisy latent vector z
        pred = self.generator(z,
                              training=training,
                              reuse=reuse,
                              name=add_suffix("generator", suffix))

        # Compute discriminator probabilities/logits for fake images
        D_fake, D_fake_logits = self.discriminator(pred,
                                                   training=training,
                                                   reuse=reuse,
                                                   name=add_suffix(
                                                       "D_fake", suffix))

        # Compute generator loss
        g_loss = self.compute_cross_entropy(D_fake_logits,
                                            tf.ones_like(D_fake),
                                            name=add_suffix("g_loss", suffix))

        # Compute discriminator loss for identifying fake images
        d_loss_fake = self.compute_cross_entropy(D_fake_logits,
                                                 tf.zeros_like(D_fake))

        # Compute discriminator probabilities/logits for real images
        D_real, D_real_logits = self.discriminator(data,
                                                   training=training,
                                                   reuse=True,
                                                   name=add_suffix(
                                                       "D_real", suffix))

        # Compute discriminator loss for identifying real images
        d_loss_real = self.compute_cross_entropy(D_real_logits,
                                                 tf.ones_like(D_real))

        # Compute discriminator loss
        d_loss = tf.add(d_loss_real,
                        d_loss_fake,
                        name=add_suffix("d_loss", suffix))

        return pred, d_loss, g_loss
Exemplo n.º 5
0
def modify_configuration():
    """ 
    Rewrites the Python statements in config.txt so that their execution results
    in the proper recording format.

    """
    buffer = ask_for("Enter a buffer size (samples): ", int, BUFFER_SIZE)
    time = ask_for("Enter a file length (seconds): ", float, RECORD_SECONDS)
    channels = ask_for("Enter number of channels (1 for mono, 2 for stereo): ", int, CHANNELS)

    devices()
    src = between(ask_for("Enter the index of the recording source: ", int, SOURCE), 0, p.get_device_count() - 1)
    samplesize = ask_for("Enter a sample rate (44100, 48000, 96000...): ", int, RATE)

    # Quote nesting to prevent Python from directly evaluating the response to the prompt.
    wav_filename = '"{0}"'.format(add_suffix(ask_for("Enter .wav filename of output: ", str), ".wav"))
    print("Audio output will be saved to " + wav_filename)

    txt_filename = '"{0}"'.format(add_suffix(ask_for("Enter .txt filename of output: ", str), ".txt"))
    print("Text output will be saved to " + txt_filename)

    iterations = between(ask_for("Enter number of iterations (leave blank for infinity): ", int, 2147483647), 1, 2147483647)

    output = True if txt_filename else False

    config = open("config.txt", "w")
    config.write(\
"\
BUFFER_SIZE = {0}\n\
RECORD_SECONDS = {1}\n\
FORMAT = pyaudio.paInt16\n\
CHANNELS = {2}\n\
SOURCE = {3}\n\
RATE = {4}\n\
OUTPUT = {5}\n\
TXT_OUTPUT_FILENAME = {6}\n\
WAVE_OUTPUT_FILENAME = {7}\n\
ITERATIONS = {8}"\
.strip().format(buffer, time, channels, src, samplesize, output, txt_filename, wav_filename, iterations \
))
Exemplo n.º 6
0
def transform(outpath, paths, fnc):

    result = []

    for image_path in paths:
        image = pImage.open(image_path)
        print('"%s" => %s' % (image_path, fnc.__name__))
        transformed = fnc(image)

        in_path = path.dirname(path.dirname(image_path))

        transformed_path = add_suffix(in_path, outpath, image_path,
                                      '_' + fnc.__name__)
        makedirs(path.dirname(transformed_path), exist_ok=True)
        transformed.save(transformed_path)

        result.append(transformed_path)

    return result
Exemplo n.º 7
0
def evaluate_model(self, data, reuse=None, training=True, suffix=None):

    # Encode input images
    z = self.encoder(self,
                     data,
                     training=training,
                     reuse=reuse,
                     name=add_suffix("encoder", suffix))

    # Sample in latent spaces
    if self.use_kl:
        h1, h2, h3, h4, h5, h6 = z
        m, log_s = tf.split(h6, num_or_size_splits=2, axis=3)
        h6 = self.sampleGaussian(m, log_s, name='latent_sample')
        h6 = tf.layers.dropout(h6, rate=self.dropout_rate, training=training)
        z = [h1, h2, h3, h4, h5, h6]
        #if self.reduce_noise:
        #    # Use KL divergence w.r.t. N(0, 0.1*I)
        #    # by comparing with 10*sigma ~ log(10*sigma) ~ log(10) + log(sigma)
        #    kl_loss = self.kl_wt*tf.reduce_sum([self.compute_kl_loss(m,tf.add(10.0*tf.ones_like(log_s),log_s))])
        #else:
        #    kl_loss = self.kl_wt*tf.reduce_sum([self.compute_kl_loss(m,log_s)])
        kl_loss = self.kl_wt * tf.reduce_sum([self.compute_kl_loss(m, log_s)])
    else:
        h1, h2, h3, h4, h5, h6 = z
        h6 = tf.nn.leaky_relu(h6)
        z = [h1, h2, h3, h4, h5, h6]
        # Compute Kullback–Leibler (KL) divergence
        kl_loss = self.kl_wt

    # Decode latent vector back to original image
    pred = self.decoder(self,
                        z,
                        training=training,
                        reuse=reuse,
                        name=add_suffix("pred", suffix))

    # Compute marginal likelihood loss
    masked_soln, masked_pred, masked_scale, interior_loss, boundary_loss, prob_loss = self.compute_ms_loss(
        data, pred, name=add_suffix("ms_loss", suffix))

    # Assign names to outputs
    masked_soln = tf.identity(masked_soln,
                              name=add_suffix('masked_soln', suffix))
    masked_pred = tf.identity(masked_pred,
                              name=add_suffix('masked_pred', suffix))
    masked_scale = tf.identity(masked_scale,
                               name=add_suffix('masked_scale', suffix))

    return masked_soln, masked_pred, masked_scale, interior_loss, boundary_loss, kl_loss, prob_loss
Exemplo n.º 8
0
    def evaluate_model(self, data, reuse=None, training=True, suffix=None):
        
        # Encode input images
        mean, log_sigma = self.encoder(data, training=training, reuse=reuse, name=add_suffix("encoder", suffix))

        # Sample latent vector
        z_sample = self.sampleGaussian(mean, log_sigma, name=add_suffix("latent_vector", suffix))

        # Decode latent vector back to original image
        pred = self.decoder(z_sample, training=training, reuse=reuse, name=add_suffix("pred", suffix))

        # Compute marginal likelihood loss
        ml_loss = self.compute_ml_loss(data, pred, name=add_suffix("ml_loss", suffix))
        
        # Compute Kullback–Leibler (KL) divergence
        kl_loss = self.compute_kl_loss(mean, log_sigma, name=add_suffix("kl_loss", suffix))
                
        # Define loss according to the evidence lower bound objective (ELBO)
        loss = tf.add(ml_loss, kl_loss, name=add_suffix("loss", suffix))

        return pred, loss, ml_loss, kl_loss
Exemplo n.º 9
0
def resize(image_in, size):   
    image_out = utils.add_suffix(image_in, 'resized')
    os.system("convert " + image_in + " -resize " + str(size) + "@ " + image_out)
    return image_out