예제 #1
0
def val(data_path, image_upsampler, wm_upsampler, blender, downsampler, extrator, summ):
    data_file = os.path.join(data_path, 'test_images.tfr')
    wm_file = os.path.join(data_path, 'watermark.mat')
    assert os.path.isfile(data_file), "Invalid file name"
    assert os.path.isfile(wm_file), "Invalid file name"

    input_ = Input(FLAGS.batch_size, [FLAGS.img_height, FLAGS.img_width, FLAGS.num_chans])
    images = input_(data_file)
    wm = Watermark(wm_file)()

    image_upsampled = image_upsampler(images)
    wm_upsampled = wm_upsampler(wm)
    image_blended = blender(image_upsampled, wm_upsampled)
    image_downsampled = downsampler(image_blended)
    wm_extracted = extrator(image_downsampled)

    image_loss = LossRegression()(image_downsampled, images)
    wm_loss = LossRegression()(wm_extracted, wm)

    summ.register('val', 'image_loss', image_loss)
    summ.register('val', 'wm_loss', wm_loss)

    val_summ_op = summ('val')

    return val_summ_op
예제 #2
0
def test():
    from config import FLAGS
    from input_ import Input, Watermark

    dim = [
        FLAGS.train_batch_size, FLAGS.img_height, FLAGS.img_width,
        FLAGS.num_chans
    ]
    image_upsampler = Upsampler(dim)
    wm_upsampler = Upsampler([1] + dim[1:])
    downsampler = Downsampler(dim, factor=4)
    blender = Blender(dim)
    extrator = Extractor(dim)

    input_ = Input(FLAGS.batch_size,
                   [FLAGS.img_height, FLAGS.img_width, FLAGS.num_chans])
    images = input_('/data/yuming/watermark-data/train_images.tfr')
    wm = Watermark('/data/yuming/watermark-data/watermark.mat')()

    image_upsampled = image_upsampler(images)
    wm_upsampled = wm_upsampler(wm)
    image_blended = blender(image_upsampled, wm_upsampled)
    image_downsampled = downsampler(image_blended)
    wm_extracted = extrator(image_downsampled)

    writer = tf.summary.FileWriter("model-output", tf.get_default_graph())

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        v = sess.run(wm_extracted)

        print(v.shape)

    writer.close()
예제 #3
0
def train(data_path, image_upsampler, wm_upsampler, blender, downsampler, extrator, summ):
    data_file = os.path.join(data_path, 'train_images.tfr')
    wm_file = os.path.join(data_path, 'watermark.mat')
    assert os.path.isfile(data_file), "Invalid file name"
    assert os.path.isfile(wm_file), "Invalid file name"

    input_ = Input(FLAGS.batch_size, [FLAGS.img_height, FLAGS.img_width, FLAGS.num_chans])
    images = input_(data_file)
    wm = Watermark(wm_file)()

    image_upsampled = image_upsampler(images)
    wm_upsampled = wm_upsampler(wm)
    image_blended = blender(image_upsampled, wm_upsampled)
    image_downsampled = downsampler(image_blended, training = True)
    wm_extracted = extrator(image_downsampled, training = True) 

    image_loss = LossRegression()(image_downsampled, images)
    wm_loss = LossRegression()(wm_extracted, wm)

    opt = Adam(FLAGS.learning_rate, lr_decay = FLAGS.lr_decay, lr_decay_steps = FLAGS.lr_decay_steps,
               lr_decay_factor = FLAGS.lr_decay_factor)

    train_op = opt(image_loss + wm_loss)

    summ.register('train', 'image_loss', image_loss)
    summ.register('train', 'wm_loss', wm_loss)

    train_summ_op = summ('train')

    return image_loss + wm_loss, train_op, train_summ_op
예제 #4
0
def xval(model, data_path, summ):

    input_ = Input(FLAGS.xval_batch_size, [FLAGS.img_height, FLAGS.img_width, FLAGS.num_channels])

    images, labels, = input_(data_path)
    logits = model(images, training = False)

    xval_accu_op = XValClassification(gpu = True)(logits, labels)

    summ.register('xval', 'xval_accuracy', xval_accu_op)
    xval_summ_op = summ('xval')

    return xval_accu_op, xval_summ_op
예제 #5
0
def val(data_path, model, summ):
    input_ = Input(FLAGS.batch_size, [FLAGS.num_chans, FLAGS.num_points])
    data, labels = input_(data_path)
    '''
    print_op = tf.print("In validation procedure -> label shape: ", tf.shape(labels), output_stream = sys.stdout)
    with tf.control_dependencies([print_op]):
        logits = model(data) 
    '''

    logits = model(data)
    loss = LossRegression()(logits, tf.expand_dims(labels, axis=-1))

    summ.register('val', 'loss', loss)

    val_summ_op = summ('val')

    return val_summ_op
예제 #6
0
def train(model, data_path, cur_iter, summ):
    input_ = Input(FLAGS.train_batch_size, [FLAGS.img_height, FLAGS.img_width, FLAGS.num_channels])

    images, labels = input_(data_path)
    logits = model(images, cur_iter = cur_iter)

    # Calculate the loss of the model.
    loss = LossClassification(FLAGS.num_classes, gpu = True)(logits, labels)

    # Create an optimizer that performs gradient descent.
    optimizer = Adam(FLAGS.learning_rate, lr_decay = False, 
                     lr_decay_steps = FLAGS.lr_decay_steps,
                     lr_decay_factor = FLAGS.lr_decay_factor)

    train_op = optimizer(loss)

    summ.register('train', 'train_loss', loss)
    summ.register('train', 'learning_rate', optimizer.lr)

    train_summ_op = summ('train')

    return loss, train_op, train_summ_op
예제 #7
0
def train(data_path, model, summ):
    input_ = Input(FLAGS.batch_size, [FLAGS.num_chans, FLAGS.num_points])
    data, labels = input_(data_path)
    ''' 
    print_op = tf.print("In train procedure -> label shape: ", tf.shape(labels), output_stream = sys.stdout)
    with tf.control_dependencies([print_op]):
        logits = model(data)
    '''

    logits = model(data)
    loss = LossRegression()(logits, tf.expand_dims(labels, axis=-1))

    opt = Adam(FLAGS.learning_rate,
               lr_decay=FLAGS.lr_decay,
               lr_decay_steps=FLAGS.lr_decay_steps,
               lr_decay_factor=FLAGS.lr_decay_factor)

    train_op = opt(loss)

    summ.register('train', 'loss', loss)
    summ.register('train', 'learning_rate', opt.lr)
    train_summ_op = summ('train')

    return loss, train_op, train_summ_op
예제 #8
0
def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.ERROR)

    # tf.logging.set_verbosity(3)  # Print INFO log messages.

    summaries = []
    assert FLAGS.test_size > 0, 'batch size for testing'

    if FLAGS.data_dir == '' or not os.path.exists(FLAGS.data_dir):
        raise ValueError('invalid data directory {}'.format(FLAGS.data_dir))

    test_data_path = os.path.join(FLAGS.data_dir, 'quantum-test.tfr')

    if FLAGS.output_dir == '':
        raise ValueError('invalid output directory {}'.format(
            FLAGS.output_dir))

    if FLAGS.data_type == "momentum":
        filenames_datafile = '/scratch/quantum-meta/filenames_transition_momentum.dat'
        warnings.warn(
            "Check the data file containing file names: {}".format(
                filenames_datafile), UserWarning)
        with open(filenames_datafile, 'rb') as f:
            filenames = pickle.load(f)
    elif FLAGS.data_type == "position":
        filenames_datafile = '/scratch/quantum-meta/filenames_transition_position.dat'
        warnings.warn(
            "Check the data file containing file names: {}".format(
                filenames_datafile), UserWarning)
        with open(filenames_datafile, 'rb') as f:
            filenames = pickle.load(f)
    else:
        print("invalid data type {}".format(FLAGS.data_type))
        return

    test_filenames = filenames['test_filenames']

    checkpoint_dir = os.path.join(FLAGS.output_dir, '')

    test_input = Input(1,
                       [FLAGS.img_height, FLAGS.img_width, FLAGS.num_channels])

    inputs, labels = test_input(test_data_path)

    model = Model(act=FLAGS.activation,
                  pool=FLAGS.pooling,
                  with_memory=FLAGS.with_memory,
                  log=False)

    inputs, labels = test_input(test_data_path)

    logits = model(inputs, training=False)

    logit_indices = tf.argmax(logits, axis=-1)

    # Define the metric and update operations
    metric_op, metric_update_op = tf.metrics.accuracy(labels, logit_indices)

    saver = tf.train.Saver(tf.trainable_variables())

    with tf.Session() as sess:
        # sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
        sess.run([tf.local_variables_initializer()])

        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint.
            saver.restore(sess, ckpt.model_checkpoint_path)

            # Assuming model_checkpoint_path looks something like:
            #   /my-favorite-path/imagenet_train/model.ckpt-0,
            # extract global_step from it.
            # global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            # print('Successfully loaded model from %s at step = %s.' %
            #       (ckpt.model_checkpoint_path, global_step))

            print('Successfully loaded model from %s.' %
                  ckpt.model_checkpoint_path)

        else:
            print('No checkpoint file found')
            return

        logits_lst = []
        labels_lst = []
        indices_lst = []

        image_data_placeholder = tf.placeholder(
            tf.float32,
            [FLAGS.img_height, FLAGS.img_width, FLAGS.num_channels])
        image_descr_placeholder = tf.placeholder(tf.string, [],
                                                 name="image_descr")

        image_data = tf.unstack(image_data_placeholder, axis=-1)

        image_data = [(data - tf.reshape(tf.reduce_min(tf.reduce_min(data)), [1, 1])) / \
                      tf.reshape((tf.reduce_max(tf.reduce_max(data)) -tf.reduce_min(tf.reduce_min(data))), [1, 1]) \
                      for data in image_data]

        image_data = [tf.expand_dims(data, axis=-1) for data in image_data]
        image_data = [tf.expand_dims(data, axis=0) for data in image_data]

        add_img_op = [
            tf.summary.image(
                "misclassified image data {}th component".format(i),
                image_data[i]) for i in range(FLAGS.num_channels)
        ]
        add_txt_op = [
            tf.summary.text(
                "misclassified image description {}th component".format(i),
                image_descr_placeholder) for i in range(FLAGS.num_channels)
        ]

        add_summ = tf.summary.merge_all()

        writer = tf.summary.FileWriter("output-outlier",
                                       tf.get_default_graph())

        logit_indices = tf.squeeze(logit_indices)
        labels = tf.squeeze(labels)

        for i in range(FLAGS.test_size):
            _, images_val, logits_val, indices_val, labels_val = sess.run(
                [metric_update_op, inputs, logits, logit_indices, labels])
            logits_lst.append(logits_val)
            labels_lst.append(labels_val)
            indices_lst.append(indices_val)

            # print("{}: ground-truth label: {}, predicted label: {}".format(i, labels_val, indices_val))

            if indices_val != labels_val:
                print("{}: ground-truth {}, predicted {}".format(
                    i, labels_val, indices_val))

                filename = test_filenames[i]
                filename = filename.split('\\')[-1]
                print("'{}'".format(filename))

                summ_str = sess.run(
                    add_summ,
                    feed_dict={
                        image_data_placeholder:
                        np.squeeze(images_val, axis=0),
                        image_descr_placeholder:
                        "{}: ground-truth {}, predicted {}".format(
                            filename, labels_val, indices_val)
                    })
                writer.add_summary(summ_str, i)

        accu = sess.run(metric_op)
        print("accu -> {}".format(accu))

        # io.savemat("logits.mat", {"logits": logits_lst, 'classes': labels_lst, 'indices': indices_lst})

        writer.close()
예제 #9
0
def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.ERROR)

    # tf.logging.set_verbosity(3)  # Print INFO log messages.

    summaries = []
    assert FLAGS.test_size > 0, 'batch size for testing'

    if FLAGS.data_dir == '' or not os.path.exists(FLAGS.data_dir):
        raise ValueError('invalid data directory {}'.format(FLAGS.data_dir))

    test_data_path = os.path.join(FLAGS.data_dir, 'quantum-test.tfr')

    if FLAGS.output_dir == '':
        raise ValueError('invalid output directory {}'.format(
            FLAGS.output_dir))

    checkpoint_dir = os.path.join(FLAGS.output_dir, '')

    test_input = Input(1,
                       [FLAGS.img_height, FLAGS.img_width, FLAGS.num_channels])

    inputs, labels = test_input(test_data_path)

    model = Model(act=FLAGS.activation,
                  pool=FLAGS.pooling,
                  with_memory=FLAGS.with_memory,
                  log=False)

    inputs, labels = test_input(test_data_path)

    logits = model(inputs, training=False)

    logit_indices = tf.argmax(logits, axis=-1)

    # Define the metric and update operations
    metric_op, metric_update_op = tf.metrics.accuracy(labels, logit_indices)

    saver = tf.train.Saver(tf.trainable_variables())

    with tf.Session() as sess:
        # sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
        sess.run([tf.local_variables_initializer()])

        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint.
            saver.restore(sess, ckpt.model_checkpoint_path)

            # Assuming model_checkpoint_path looks something like:
            #   /my-favorite-path/imagenet_train/model.ckpt-0,
            # extract global_step from it.
            # global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            # print('Successfully loaded model from %s at step = %s.' %
            #       (ckpt.model_checkpoint_path, global_step))

            print('Successfully loaded model from %s.' %
                  ckpt.model_checkpoint_path)

        else:
            print('No checkpoint file found')
            return

        logits_lst = []
        labels_lst = []
        indices_lst = []

        for i in range(FLAGS.test_size):
            _, logits_val, indices_val, labels_val = sess.run(
                [metric_update_op, logits, logit_indices, labels])
            logits_lst.append(logits_val)
            labels_lst.append(labels_val)
            indices_lst.append(indices_val)

        accu = sess.run(metric_op)
        print("accu -> {}".format(accu))

        io.savemat("tsne.mat", {
            "logits": logits_lst,
            'classes': labels_lst,
            'indices': indices_lst
        })

        eng = matlab.engine.start_matlab("-nodisplay -nosplash")
        accus = eng.calc_class_accu(
            matlab.int32(np.squeeze(labels_lst).tolist()),
            matlab.int32(np.squeeze(indices_lst).tolist()))
        eng.quit()

        for i, a in enumerate(np.squeeze(np.array(accus)).tolist()):
            print("C = {}: {}".format(i - 2, a))