예제 #1
0
def get_model_and_optimizer(opts):
    """ Load the model and optimizer """
    m_step = GMM_M_Step

    if not opts.eval_mode:
        e_step = CNN(opts)
        e_step.build(input_shape=(opts.img_size, opts.img_size, 1))
        opt = tf.optimizers.Adam(0.0001, epsilon=1e-7)
    else:
        e_step = tf.saved_model.load(opts.load_path)
        opt = None

    return e_step, m_step, opt
예제 #2
0
    def generate_map(image):
        image = load_single_image(image)
        hyper = HyperParams(verbose=False)
        images_tf = tf.placeholder(
            tf.float32, [None, hyper.image_h, hyper.image_w, hyper.image_c],
            name="images")
        class_tf = tf.placeholder(tf.int64, [None], name='class')
        cnn = CNN()
        if hyper.fine_tuning:
            cnn.load_vgg_weights()

        conv_last, gap, class_prob = cnn.build(images_tf)
        classmap = cnn.get_classmap(class_tf, conv_last)

        with tf.Session() as sess:
            tf.train.Saver().restore(sess, hyper.model_path)
            conv_last_val, class_prob_val = sess.run(
                [conv_last, class_prob], feed_dict={images_tf: image})

            # use argsort instead of argmax to get all the classes
            class_predictions_all = class_prob_val.argsort(axis=1)

            roi_map = None
            for i in range(-1 * hyper.top_k, 0):

                current_class = class_predictions_all[:, i]
                classmap_vals = sess.run(classmap,
                                         feed_dict={
                                             class_tf: current_class,
                                             conv_last: conv_last_val
                                         })
                normalized_classmap = normalize(classmap_vals[0])

                if roi_map is None:
                    roi_map = 1.2 * normalized_classmap
                else:
                    # simple exponential ranking
                    roi_map = (roi_map + normalized_classmap) / 2
            roi_map = normalize(roi_map)

        # Plot the heatmap on top of image
        fig, ax = plt.subplots(1, 1, figsize=(12, 9))
        ax.margins(0)
        plt.axis('off')
        plt.imshow(roi_map, cmap=plt.cm.jet, interpolation='nearest')
        plt.imshow(image[0], alpha=0.4)

        # save the plot and the map
        if not os.path.exists('static/output'):
            os.makedirs('static/output')
        plt.savefig('static/output/overlayed_heatmap.png')
        skimage.io.imsave('static/output/msroi_map.jpg', roi_map)
def compression_engine(img):

    image = load_single_image(img)

    print("INPUT IMAGE ARRAY ",image.shape)

    hyper = HyperParams(verbose=False)
    images_tf = tf.placeholder(tf.float32, [None, hyper.image_h, hyper.image_w, hyper.image_c], name="images")
    class_tf  = tf.placeholder(tf.int64, [None], name='class')

    cnn = CNN()
    if hyper.fine_tuning:
        cnn.load_vgg_weights()

    conv_last, gap, class_prob = cnn.build(images_tf)
    classmap = cnn.get_classmap(class_tf, conv_last)

    with tf.Session() as sess:
        tf.train.Saver().restore( sess, hyper.model_path )
        conv_last_val, class_prob_val = sess.run([conv_last, class_prob], feed_dict={images_tf: image})

        # use argsort instead of argmax to get all the classes
        class_predictions_all = class_prob_val.argsort(axis=1)

        roi_map = None
        for i in range(-1 * hyper.top_k,0):

            current_class = class_predictions_all[:,i]
            classmap_vals = sess.run(classmap, feed_dict={class_tf: current_class, conv_last: conv_last_val})
            normalized_classmap = normalize(classmap_vals[0])

            if roi_map is None:
                roi_map = 1.2 * normalized_classmap
            else:
                # simple exponential ranking
                roi_map = (roi_map + normalized_classmap)/2
        roi_map = normalize(roi_map)

    # save the plot and the map
    skimage.io.imsave( 'msroi_map.jpg', roi_map )
 




    original = Image.open(img)

    #print("ORIGINAL : ",original)
    sal = Image.open('msroi_map.jpg')

    make_quality_compression(original,sal,img,original)
예제 #4
0
len_test = len(data_train)
train_b_num = int(math.ceil(len_train / tparam.batch_size))
test_b_num = int(math.ceil(len_train / tparam.batch_size))
images_tf = tf.placeholder(tf.float32,
                           [None, hyper.image_h, hyper.image_w, hyper.image_c],
                           name="images")
if hyper.sparse:
    labels_tf = tf.placeholder(tf.int64, [None], name='labels')
else:
    labels_tf = tf.placeholder(tf.int64, [None, hyper.n_labels], name='labels')

cnn = CNN()
if hyper.fine_tuning:
    cnn.load_vgg_weights()

_, _, prob_tf = cnn.build(images_tf)
if hyper.sparse:
    loss_tf = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(prob_tf, labels_tf))
else:
    loss_tf = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(prob_tf, labels_tf))
train_loss = tf.scalar_summary("training loss", loss_tf)
test_loss = tf.scalar_summary("validation loss", loss_tf)

if tparam.optimizer == 'Adam':
    optimizer = tf.train.AdamOptimizer(tparam.learning_rate, epsilon=0.1)
elif tparam.optimizer == 'Ftlr':
    optimizer = tf.train.FtrlOptimizer(tparam.learning_rate)
elif tparam.optimizer == 'Rmsprop':
    optimizer = tf.train.RMSPropOptimizer(tparam.learning_rate)
예제 #5
0
def compression_engine(img):

    image = load_single_image(img)

    #print("INPUT IMAGE ARRAY ",image.shape)

    hyper = HyperParams(verbose=False)
    images_tf = tf.placeholder(tf.float32, [None, hyper.image_h, hyper.image_w, hyper.image_c], name="images")
    class_tf  = tf.placeholder(tf.int64, [None], name='class')

    cnn = CNN()
    if hyper.fine_tuning:
        cnn.load_vgg_weights()

    conv_last, gap, class_prob = cnn.build(images_tf)
    classmap = cnn.get_classmap(class_tf, conv_last)

    with tf.Session() as sess:
        tf.train.Saver().restore( sess, hyper.model_path )
        conv_last_val, class_prob_val = sess.run([conv_last, class_prob], feed_dict={images_tf: image})

        # use argsort instead of argmax to get all the classes
        class_predictions_all = class_prob_val.argsort(axis=1)

        roi_map = None
        for i in range(-1 * hyper.top_k,0):

            current_class = class_predictions_all[:,i]
            classmap_vals = sess.run(classmap, feed_dict={class_tf: current_class, conv_last: conv_last_val})
            normalized_classmap = normalize(classmap_vals[0])

            if roi_map is None:
                roi_map = 1.2 * normalized_classmap
            else:
                # simple exponential ranking
                roi_map = (roi_map + normalized_classmap)/2
        roi_map = normalize(roi_map)


    # Plot the heatmap on top of image
    fig, ax = plt.subplots(1, 1, figsize=(12, 9))
    ax.margins(0)
    plt.axis('off')
    plt.imshow( roi_map, cmap=plt.cm.jet, interpolation='nearest' )
    plt.imshow( image[0], alpha=0.4)

    # save the plot and the map
    if not os.path.exists('output'):
        os.makedirs('output')
    plt.savefig('output/overlayed_heatmap.png')
    skimage.io.imsave( 'msroi_map.jpg', roi_map )
    plt.clf()
    print("MSROI TYPE : ",type(roi_map))
    plt.close()




    from glob import glob
    # make the output directory to store the Q level images,

    if not os.path.exists(output_directory):
        os.makedirs(output_directory)


    original = Image.open(img)

    #print("ORIGINAL : ",original)
    sal = Image.open('msroi_map.jpg')

    out_name = make_quality_compression(original,sal,img,original)

    return out_name
예제 #6
0
                                                  random_state=46)

# Generate batches of tensor image data with real-time data augmentation.
# The data will be looped over (in batches).
aug_data = ImageDataGenerator(rotation_range=25,
                              width_shift_range=0.1,
                              height_shift_range=0.1,
                              shear_range=0.2,
                              zoom_range=0.2,
                              horizontal_flip=True,
                              fill_mode="nearest")

# build the model from the CNN class in model.py
model = CNN.build(width=image_dims[1],
                  height=image_dims[0],
                  depth=image_dims[2],
                  classes=len(mlb.classes_),
                  activation="sigmoid")

print("Loading model and compiling...")
# compile the model ready to fit
optimiser = Adam(lr=init_learning, decay=init_learning / epochs)
model.compile(loss="binary_crossentropy",
              optimizer=optimiser,
              metrics=["accuracy"])

print("begin training...")
# begin training

# from keras.utils import plot_model
# plot_model(model, to_file='model.png')
from model import CNN
from params import HyperParams
import skimage.io


import tensorflow.compat.v1 as tf
tf.disable_v2_behavior() 
hyper = HyperParams(verbose=False)
images_tf = tf.placeholder(tf.float32, [None, hyper.image_h, hyper.image_w, hyper.image_c], name="images")
class_tf  = tf.placeholder(tf.int64, [None], name='class')

cnn = CNN()
if hyper.fine_tuning: 
    cnn.load_vgg_weights()

conv_last, gap, class_prob = cnn.build(images_tf)
classmap = cnn.get_classmap(class_tf, conv_last)

with tf.Session() as sess:
    tf.train.Saver().restore( sess, hyper.model_path )
    conv_last_val, class_prob_val = sess.run([conv_last, class_prob], feed_dict={images_tf: image})

    # use argsort instead of argmax to get all the classes
    class_predictions_all = class_prob_val.argsort(axis=1)

    roi_map = None
    for i in range(-1 * hyper.top_k,0):
        current_class = class_predictions_all[:,i]
        classmap_vals = sess.run(classmap, feed_dict={class_tf: current_class, conv_last: conv_last_val})
        normalized_classmap = normalize(classmap_vals[0])
        
예제 #8
0
파일: play.py 프로젝트: salty-vanilla/tf-rl
    anim.save(str(dst_path), writer=writer)


yml_path = sys.argv[1]
with open(yml_path) as f:
    config = yaml.load(f)
logdir = Path(config['logdir'])

env = CartPoleEnv(**config['env_params'])

if config['env_params']['state_mode'] == 'image':
    model = CNN(env.action_space.n)
else:
    model = MLP(env.action_space.n)

model.build((None, *env.get_state_shape()))
model.load_weights(str(logdir / 'model' /
                       f'model_{config["test_episode"]}.h5'))

env.reset()

state = env.reset()
frames = [env.render(mode='rgb_array')]

rewards = 0
for t in count():
    state = tf.constant(state, dtype=tf.float32)
    action = tf.argmax(model(tf.expand_dims(state, 0)), axis=1)
    action = tf.reshape(action, ())
    action = np.array(action)
    next_state, reward, done, _ = env.step(action)