Пример #1
0
# Calculo do histograma 
hist_RGB_given_image, bins = np.histogram(img, n_columns_histogram)

path_1 = "%s/arrays/level1_%d_%d" % (images_path, kCentroids_features, cIter_features)

# Recupera vetores calculados pelo KMEANS
centroids_codebook_features = np.load('%s/centroids_codebook_features_%d_%d_%s_%s_%s_%s.npy' % (path_1, kCentroids_features, cIter_features, useTamuraCoarseness, useTamuraContrast, useTamuraDirectionality, useGarbor))
vq_codes_obs_features = np.load('%s/vq_codes_obs_features_%d_%d_%s_%s_%s_%s.npy' % (path_1, kCentroids_features, cIter_features, useTamuraCoarseness, useTamuraContrast, useTamuraDirectionality, useGarbor))
standard_deviations_features = np.load('%s/standard_deviations_features_%d_%d_%s_%s_%s_%s.npy' % (path_1, kCentroids_features, cIter_features, useTamuraCoarseness, useTamuraContrast, useTamuraDirectionality, useGarbor))

learning_set_features_image = np.zeros(n_colums_features)
j = 0

if useTamuraCoarseness:
    learning_set_features_image[j] = coarseness(img)
    j = j + 1

if useTamuraContrast:
    learning_set_features_image[j] = contrast(img)
    j = j + 1

if useTamuraDirectionality:
    learning_set_features_image[j] = degreeDirect(img, threshold, neigh)
    j = j + 1

if useGarbor: 
    
    start_i = j
    stop_i = start_i + n_kernels * 2
    
Пример #2
0
         if os.path.isfile(os.path.join(images_path, 'tamura_coarseness_%d.npy' % (r + 1))):
             tamura_coarseness_v[r] = np.load(os.path.join(images_path, 'tamura_coarseness_%d.npy' % (r + 1)))
         else : 
     
             if img is None:
         
                 img = misc.imread(os.path.join(images_path, 'im%d.jpg' % (r + 1) ))
     
                 if useGreyScale:
                     img = rgb2gray(img)
     
             print 'Calculating Tamura COARSENESS for file ', os.path.join(images_path, 'im%d.jpg' % (r + 1))
             
             try:
                 start_time = time.time()   
                 tamura_coarseness_v[r] = coarseness(img)
                 elapsed_time = time.time() - start_time
                 print 'It took %ds to calculate COARSENESS...' % elapsed_time
                 np.save(os.path.join(images_path, 'tamura_coarseness_%d.npy' % (r + 1)),tamura_coarseness_v[r])
                 
             except:
                 failures_coarseness.append(r+1)
         
         print tamura_coarseness_v[r] 
         
 if useTamuraContrast:
                                     
     if not tamura_contrast_hasBeenCalculated:
         
         if os.path.isfile(os.path.join(images_path, 'tamura_contrast_%d.npy' % (r + 1))):
             tamura_contrast_v[r] = np.load(os.path.join(images_path, 'tamura_contrast_%d.npy' % (r + 1)))
def optimize(content_targets,
             style_target,
             content_weight,
             style_weight,
             tv_weight,
             fcrs_weight,
             fcon_weight,
             fdir_weight,
             vgg_path,
             epochs=2,
             print_iterations=1000,
             batch_size=4,
             save_path='saver/fns.ckpt',
             slow=False,
             learning_rate=1e-3,
             debug=False):
    if slow:
        batch_size = 1
    mod = len(content_targets) % batch_size
    if mod > 0:
        print("Train set has been trimmed slightly..")
        content_targets = content_targets[:-mod]

    style_features = {}
    batch_shape = (batch_size, 256, 256, 3)
    style_shape = (1, ) + style_target.shape
    print(style_shape)
    prop = [1.0, 2.0, 16.0, 2.0, 1.0]

    # precomputer content texture measurement
    if batch_size == 1:
        with tf.Graph().as_default(), tf.Session() as sess:
            content_image = tf.placeholder(tf.float32,
                                           shape=batch_shape,
                                           name='content_image')
            image = np.zeros(batch_shape, dtype=np.float32)
            image[0] = get_img(content_targets[0],
                               (256, 256, 3)).astype(np.float32)
            fcrs = tamura.coarseness(content_image)
            fcon = tamura.contrast(content_image)
            fdir = tamura.directionality(content_image)
            content_fcrs = fcrs.eval(feed_dict={content_image: image})
            content_fcon = fcon.eval(feed_dict={content_image: image})
            content_fdir = fdir.eval(feed_dict={content_image: image})

    # precompute style texture features
    with tf.Graph().as_default(), tf.Session() as sess:
        style_image = tf.placeholder(tf.float32,
                                     shape=style_shape,
                                     name='style_image')
        image = np.zeros(batch_shape, dtype=np.float32)
        image = np.array([style_target])
        fcrs = tamura.coarseness(style_image)
        fcon = tamura.contrast(style_image)
        fdir = tamura.directionality(style_image)
        style_fcrs = fcrs.eval(feed_dict={style_image: image})
        style_fcon = fcon.eval(feed_dict={style_image: image})
        style_fdir = fdir.eval(feed_dict={style_image: image})

    # precompute style features
    with tf.Graph().as_default(), tf.device('/cpu:0'), tf.Session() as sess:
        style_image = tf.placeholder(tf.float32,
                                     shape=style_shape,
                                     name='style_image')
        style_image_pre = vgg.preprocess(style_image)
        net = vgg.net(vgg_path, style_image_pre)
        style_pre = np.array([style_target])
        for layer in STYLE_LAYERS:
            features = net[layer].eval(feed_dict={style_image: style_pre})
            features = np.reshape(features, (-1, features.shape[3]))
            gram = np.matmul(features.T, features) / features.size
            style_features[layer] = gram

    with tf.Graph().as_default(), tf.Session() as sess:
        X_content = tf.placeholder(tf.float32,
                                   shape=batch_shape,
                                   name="X_content")
        X_pre = vgg.preprocess(X_content)

        # precompute content features
        content_features = {}
        content_net = vgg.net(vgg_path, X_pre)
        content_features[CONTENT_LAYER] = content_net[CONTENT_LAYER]

        if slow:
            preds = tf.Variable(
                tf.random_normal(X_content.get_shape()) * 0.256)
            preds_pre = preds
        else:
            preds = transform.net(X_content / 255.0)
            preds_pre = vgg.preprocess(preds)

        net = vgg.net(vgg_path, preds_pre)

        content_size = _tensor_size(
            content_features[CONTENT_LAYER]) * batch_size
        assert _tensor_size(content_features[CONTENT_LAYER]) == _tensor_size(
            net[CONTENT_LAYER])
        content_loss = content_weight * (
            2 * tf.nn.l2_loss(net[CONTENT_LAYER] -
                              content_features[CONTENT_LAYER]) / content_size)

        style_losses = []
        #for style_layer in STYLE_LAYERS:
        for i in range(5):
            style_layer = STYLE_LAYERS[i]
            layer = net[style_layer]
            bs, height, width, filters = map(lambda i: i.value,
                                             layer.get_shape())
            size = height * width * filters
            feats = tf.reshape(layer, (bs, height * width, filters))
            feats_T = tf.transpose(feats, perm=[0, 2, 1])
            grams = tf.matmul(feats_T, feats) / size
            style_gram = style_features[style_layer]
            style_losses.append(2 * prop[i] *
                                tf.nn.l2_loss(grams - style_gram) /
                                style_gram.size)

        style_loss = style_weight * functools.reduce(tf.add,
                                                     style_losses) / batch_size

        # total variation denoising
        tv_y_size = _tensor_size(preds[:, 1:, :, :])
        tv_x_size = _tensor_size(preds[:, :, 1:, :])
        y_tv = tf.nn.l2_loss(preds[:, 1:, :, :] -
                             preds[:, :batch_shape[1] - 1, :, :])
        x_tv = tf.nn.l2_loss(preds[:, :, 1:, :] -
                             preds[:, :, :batch_shape[2] - 1, :])
        tv_loss = tv_weight * 2 * (x_tv / tv_x_size +
                                   y_tv / tv_y_size) / batch_size

        # tamura loss
        pred_fcrs = tamura.coarseness(preds_pre)
        pred_fcon = tamura.contrast(preds_pre)
        pred_fdir = tamura.directionality(preds_pre)
        fcrs_loss = fcrs_weight * 2 * tf.nn.l2_loss(pred_fcrs - style_fcrs)
        fcon_loss = fcon_weight * 2 * tf.nn.l2_loss(pred_fcon - style_fcon)
        fdir_loss = fdir_weight * 2 * tf.nn.l2_loss(pred_fdir - style_fdir)

        loss = content_loss + style_loss + tv_loss + fcrs_loss + fcon_loss + fdir_loss

        # overall loss
        global_step = tf.Variable(0, trainable=False)
        add_global = global_step.assign_add(1)
        lr = tf.train.exponential_decay(learning_rate,
                                        global_step=global_step,
                                        decay_steps=10,
                                        decay_rate=0.9)
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
        sess.run(tf.global_variables_initializer())
        import random
        uid = random.randint(1, 100)
        print("UID: %s" % uid)
        for epoch in range(epochs):
            #sess.run(add_global)
            num_examples = len(content_targets)
            iterations = 0
            while iterations * batch_size < num_examples:
                start_time = time.time()
                curr = iterations * batch_size
                step = curr + batch_size
                X_batch = np.zeros(batch_shape, dtype=np.float32)
                for j, img_p in enumerate(content_targets[curr:step]):
                    X_batch[j] = get_img(img_p,
                                         (256, 256, 3)).astype(np.float32)

                iterations += 1
                assert X_batch.shape[0] == batch_size

                feed_dict = {X_content: X_batch}

                train_step.run(feed_dict=feed_dict)
                end_time = time.time()
                delta_time = end_time - start_time
                if debug:
                    print("UID: %s, batch time: %s" % (uid, delta_time))
                is_print_iter = int(iterations) % print_iterations == 0
                if slow:
                    is_print_iter = epoch % print_iterations == 0
                is_last = epoch == epochs - 1 and iterations * batch_size >= num_examples
                should_print = is_print_iter or is_last
                if should_print:
                    to_get = [
                        style_loss, content_loss, tv_loss, fcrs_loss,
                        fcon_loss, fdir_loss, loss, preds, lr, global_step
                    ]
                    #to_get = [style_loss, content_loss, tv_loss, fcrs_loss, fcon_loss, fdir_loss, loss, preds]
                    test_feed_dict = {X_content: X_batch}

                    tup = sess.run(to_get, feed_dict=test_feed_dict)
                    _style_loss, _content_loss, _tv_loss, _fcrs_loss, _fcon_loss, _fdir_loss, _loss, _preds, _lr, _global_step = tup
                    #_style_loss,_content_loss,_tv_loss,_fcrs_loss,_fcon_loss,_fdir_loss,_loss,_preds = tup
                    losses = (_style_loss, _content_loss, _tv_loss, _fcrs_loss,
                              _fcon_loss, _fdir_loss, _loss)
                    if slow:
                        _preds = vgg.unprocess(_preds)
                    else:
                        saver = tf.train.Saver()
                        res = saver.save(sess, save_path)
                    yield (_preds, losses, iterations, epoch, _lr,
                           _global_step)