Exemple #1
0
 def build_model(self, input_images, vgg_path):
     self.train_mode = tf.placeholder(tf.bool)
     self.vgg = vgg19.Vgg19(vgg_path,
                            dropout=self.keep_prob,
                            trainable=True)
     self.vgg.build(input_images, self.train_mode)
     self.output = self.vgg.relu7
Exemple #2
0
def evaluate(test_data, config):
    print("Building net")

    net = vgg19_trainable.Vgg19(vgg19_npy_path=load_weights(config),
                                trainable=True)
    with tf.device('/gpu:0'):
        sess = tf.Session()
        net.build()
        sess.run(tf.global_variables_initializer())
        _evaluate(net, sess, test_data, config)
def main():
    basepath = TEST_DATA
    trainpath = TRAIN_DATA
    bakpath = "bak"

    parser = argparse.ArgumentParser()
    parser.add_argument("--path",
                        help="读取某个文件夹下的所有图像文件,default=" + TEST_DATA,
                        default=TEST_DATA)
    parser.add_argument("--tpath",
                        help="读取训练图像数据,default=" + TRAIN_DATA,
                        default=TRAIN_DATA)
    args = parser.parse_args()

    if args.path: basepath = args.path
    if args.tpath: trainpath = args.tpath

    mkdir(bakpath + "/" + basepath)
    imgdata = []
    imgdata = loadFrom(basepath)  #20180330
    num = len(imgdata)
    if num == 0:
        utils.printcolor("图像文件数量为0", mode='bold', fore='red')
        return

    per = 5 if (num > 5) else num
    count = int(num / per) if (num % per == 0) else int(num / per) + 1
    print(per, num, count, num % per)

    with tf.device('/cpu:0') as device:
        sess = tf.Session()

        vgg = vgg19.Vgg19('./vgg19.npy')
        images = tf.placeholder("float", [per, 224, 224, 3])
        train_mode = tf.placeholder(tf.bool)
        vgg.build(images, train_mode)
        print(vgg.get_var_count())
        sess.run(tf.global_variables_initializer())

        results = []
        for i in range(0, num):
            results.append(true_result(728))

        for x in range(0, count):
            xdata = imgdata[x * per:x * per + per]
            result = results[x * per:x * per + per]
            if len(xdata) == num % per:
                xdata = imgdata[-per:]
                result = results[-per:]
                tensor_imgdata(sess, xdata, images, train_mode, result, vgg,
                               bakpath)
            else:
                tensor_imgdata(sess, xdata, images, train_mode, result, vgg,
                               bakpath)
        vgg.save_npy(sess, './vgg19_train.npy')
Exemple #4
0
def evaluate(test_data, config):
    log(config.log_file, "Building net")

    net = vgg19_trainable.Vgg19(vgg19_npy_path=load_weights(config),
                                trainable=True)
    with tf.device('/gpu:0'):
        sess = tf.Session()
        net.build(num_cats=config.num_classes)
        sess.run(tf.global_variables_initializer())
        log(config.log_file, "starting evaluation")
        _evaluate(net, sess, test_data, config)
Exemple #5
0
def train1(data):
    sess = tf.Session()
    images = tf.placeholder(tf.float32, [None, 32, 32, 3])
    true_out = tf.placeholder(tf.float32, [None, 1])
    train_mode = tf.placeholder(tf.bool)
    vgg = vgg19.Vgg19()
    vgg.build(images, train_mode)
    print(vgg.get_var_count())
    sess.run(tf.global_variables_initializer())

    for epoch in range(10):
        print('epoch: {}...'.format(epoch))
        x_list, y_list = data.next_batch(100)
        prob = sess.run(vgg.prob,
                        feed_dict={
                            images: x_list,
                            train_mode: False
                        })
        correct_prediction = tf.equal(tf.argmax(prob, 1), y_list)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        accuracy = sess.run(accuracy,
                            feed_dict={
                                images: x_list,
                                true_out: y_list,
                                train_mode: False
                            })
        print('accuracy1= ', accuracy)

        cost = tf.reduce_sum((vgg.prob - true_out)**2)
        train = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
        sess.run(train,
                 feed_dict={
                     images: x_list,
                     true_out: y_list,
                     train_mode: True
                 })
        prob = sess.run(vgg.prob,
                        feed_dict={
                            images: x_list,
                            train_mode: False
                        })
        correct_prediction = tf.equal(tf.argmax(prob, 1), y_list)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        accuracy = sess.run(accuracy)
        # print(prob)
        print('accuracy2= ', accuracy)
Exemple #6
0
def extract_features(config, train_data, test_data):
    log(config.log_file, "Building net...")
    with tf.device('/gpu:0'):
        sess = tf.Session()
        net = vgg19_trainable.Vgg19(vgg19_npy_path=load_weights(config),
                                    trainable=True)
        net.build(num_cats=config.num_classes)
        sess.run(tf.global_variables_initializer())
        out_dir = config.data
        log(config.log_file, "extracting test...")
        test_features, test_labels = _extract_features(test_data, net, sess,
                                                       config)
        np.save(os.path.join(out_dir, 'test_features.npy'), test_features)
        np.save(os.path.join(out_dir, 'test_labels.npy'), test_labels)
        log(config.log_file, "extracting train...")
        train_features, train_labels = _extract_features(
            train_data, net, sess, config)
        np.save(os.path.join(out_dir, 'train_features.npy'), train_features)
        np.save(os.path.join(out_dir, 'train_labels.npy'), train_labels)
Exemple #7
0
    def create_model(self):

        self.train_mode = tf.placeholder(tf.bool)
        self.inputs2D = tf.placeholder(tf.float32,
                                       shape=[self.batch_size, 224, 224, 3],
                                       name='input2D')
        self.input_onelayers = tf.placeholder(tf.float32,
                                              shape=[self.batch_size, 1000],
                                              name='input_onelayer')

        with tf.variable_scope("generator") as scope:
            self.G = self.generator(self.input_onelayers)

        self.vgg = vgg19.Vgg19('./vgg19.npy')
        self.vgg.build(self.inputs2D, self.train_mode)

        cluster2 = tf.cast(self.clusters, tf.float32)
        self.cluster_L = tf.reduce_mean(
            tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(
                labels=cluster2, logits=self.probability_enc),
                          reduction_indices=1))
        self.g_loss = self.cluster_L
        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
Exemple #8
0
def train(num_class):	
	if True:
		#train		
		with tf.device('/cpu:0'):
			sess = tf.Session()					
			train_mode = tf.constant(True, dtype=tf.bool)
			#读取train的图片和label
			# files_list = "./train_fvgg_emo.txt"
			filename_queue = tf.train.string_input_producer(["./train_fvgg_emo.txt"])
			reader = tf.TextLineReader()
			filename, value= reader.read(filename_queue)
			image_name, label = tf.decode_csv(value, record_defaults=[["string"],["int32"]],field_delim=" ")

			image_content = tf.read_file(image_name)
			image_data = tf.image.decode_jpeg(image_content, channels=3)
			image = tf.image.resize_images(image_data, [224, 224])
			label = tf.string_to_number(label,tf.int32)
			
			labels = tf.one_hot(label,num_class,1,0,dtype=tf.int32)
			labels = tf.cast(labels  , tf.float32)
			image_batch,label_batch = tf.train.batch([image,labels],batch_size = 20)

			vgg = vgg19.Vgg19(num_class,'./vgg19.npy')
			vgg.build(image_batch,train_mode)
			
			cost = tf.reduce_sum((vgg.prob - label_batch) ** 2)
			train = tf.train.AdamOptimizer(1e-4).minimize(cost)
			# coord = tf.train.Coordinator()
			#train
			sess.run(tf.global_variables_initializer())	
			tf.train.start_queue_runners(sess=sess)
			for i in range(5):
				sess.run(train)
				print("train process: %f loss :" % (i/50))
				print(cost)
			#保存模型
			vgg.save_npy(sess,"./rzc_vgg19.npy")
Exemple #9
0
def train(train_data, test_data, config):
    log(config.log_file, "Starting training")
    with tf.device('/gpu:0'):
        sess = tf.Session()

        start_epoch = 0
        WEIGHTS = config.weights
        if WEIGHTS != -1:
            ld = config.log_dir
            start_epoch = WEIGHTS + 1
            ACC_LOGGER.load(
                (os.path.join(ld, "{}_acc_train_accuracy.csv".format(
                    config.name)),
                 os.path.join(ld, "{}_acc_eval_accuracy.csv".format(
                     config.name))),
                epoch=WEIGHTS)
            LOSS_LOGGER.load(
                (os.path.join(ld, "{}_loss_train_loss.csv".format(
                    config.name)),
                 os.path.join(ld, '{}_loss_eval_loss.csv'.format(
                     config.name))),
                epoch=WEIGHTS)

        net = vgg19_trainable.Vgg19(vgg19_npy_path=load_weights(config),
                                    trainable=True)

        log(config.log_file, "Weights loaded")
        net.build(lr=config.lr, num_cats=config.num_classes)
        sess.run(tf.global_variables_initializer())

        it_per_epoch = train_data.size / config.batch_size
        begin = start_epoch
        end = config.max_epoch + start_epoch
        for epoch in range(begin, end + 1):

            accs = []
            losses = []
            for it in range(it_per_epoch):

                batch_images, batch_labels, reset = train_data.next_batch(
                    config.batch_size)
                _, loss, logits = net.train(batch_images, batch_labels, sess)
                acc = np.sum(
                    np.argmax(logits, axis=1) == batch_labels) / float(
                        len(batch_labels))
                accs.append(acc)
                losses.append(loss)

                if it % max(config.train_log_frq / config.batch_size, 1) == 0:
                    loss = np.mean(losses)
                    acc = np.mean(accs)
                    log(
                        config.log_file,
                        "TRAINING epoch: {} it: {}  loss: {} acc: {} ".format(
                            epoch, it, loss, acc))
                    LOSS_LOGGER.log(loss, epoch, "train_loss")
                    ACC_LOGGER.log(acc, epoch, "train_accuracy")

                    ACC_LOGGER.save(config.log_dir)
                    LOSS_LOGGER.save(config.log_dir)
                    ACC_LOGGER.plot(dest=config.log_dir)
                    LOSS_LOGGER.plot(dest=config.log_dir)

            _evaluate(net, sess, test_data, config, epoch=epoch)
            if epoch % config.save_period == 0 or epoch == end:
                net.save_npy(
                    sess,
                    os.path.join(config.log_dir,
                                 config.snapshot_prefix + str(epoch)))

            if epoch > 0 and epoch % config.lr_decay_step == 0:
                net.update_lr(config.lr_decay)
                log(config.log_file,
                    "Updated learning rate to {}".format(net.lr))
Exemple #10
0
                               trainable=True,
                               name='bias')
            print "xavier"
        # 全连接层可以使用relu_layer函数比较方便,不用像卷积层使用relu函数
        activation = tf.nn.relu_layer(x, weight, bias, name=name)
        print_layer(activation)
        return activation


# create the model
x = tf.placeholder(tf.float32, shape=[None, WIDTH, HEIGHT, CHANNEL], name="x")
y_ = tf.placeholder(tf.float32, shape=[None, CLASSES], name='y_')
keep_prob = tf.placeholder("float", name="keep_prob")

import vgg19_trainable as vgg19
vgg = vgg19.Vgg19("./vgg19.npy")
vgg.build(x, keep_prob)
y = vgg.prob
tf.add_to_collection('y', y)

cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))

#train_step = tf.train.GradientDescentOptimizer(LRARNING_RATE).minimize(cross_entropy)
train_step = tf.train.AdamOptimizer(LRARNING_RATE).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

if __name__ == '__main__':
    if len(sys.argv) > 1 and sys.argv[1] == '1':
Exemple #11
0
import matplotlib.image as mpimg
import tensorflow as tf
import numpy as np
import utils
import vgg19_trainable as vgg19
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
sess = tf.Session()
batch_size = 300

images = tf.placeholder(tf.float32, [None, 28, 28, 1])
true_out = tf.placeholder(tf.float32, [None, 10])
train_mode = tf.placeholder(tf.bool)

vgg = vgg19.Vgg19()
vgg.build(images, train_mode)

print(vgg.get_var_count())

# test classification
sess.run(tf.global_variables_initializer())

batch = mnist.train.next_batch(batch_size)
batch_img = batch[0].reshape((-1,28,28,1))
batch_lbl = batch[1]

print(batch_img.shape, batch_lbl.shape)

print (np.argmax(batch_lbl[0]))
print (np.argmax(batch_lbl[1]))
#Network Strat
def weight_variable(shape):
  initial = tf.truncated_normal(shape, stddev=0.001)
  return tf.Variable(initial)

def bias_variable(shape):
  initial = tf.constant(0.1, shape=shape)
  return tf.Variable(initial)


Xp = tf.placeholder(tf.float32, [None, 224, 224, 3])
Yp = tf.placeholder(tf.float32, [None, 2])
train_mode = tf.placeholder(tf.bool)

vgg = vgg19.Vgg19('vgg19.npy')
vgg.build(Xp, train_mode)


Wdense18=weight_variable([4096,4096])
Bdense18=bias_variable([4096])
dense18=tf.nn.relu(tf.matmul(vgg.fc6,Wdense18)+Bdense18)

Wdense19=weight_variable([4096,1024])
Bdense19=bias_variable([1024])
dense19=tf.nn.relu(tf.matmul(dense18,Wdense19)+Bdense19)

Wdense20=weight_variable([1024,2])
Bdense20=bias_variable([2])
OUT=tf.nn.softmax(tf.matmul(dense19,Wdense20)+Bdense20)
    varlist = [
        Wconv1, Bconv1, Wconv2, Bconv2, Wconv3, Bconv3, Wconv4, Bconv4, Wconv5,
        Bconv5, Wconv6, Bconv6, Wconv7, Bconv7, Wconv8, Bconv8, Wfc1, Bfc1,
        Wfc2, Bfc2
    ]
    return Gout, Dout, varlist


Xp = tf.placeholder(tf.float32, shape=[None, None, None, 3])
Yp = tf.placeholder(tf.float32, shape=[None, None, None, 3])

GOUT, Gvarlist = Generator(Xp)
DOUT_G, DOUT_D, Dvarlist = Discriminator(GOUT, Yp)

train_mode = tf.placeholder(tf.bool)
vgg1 = vgg19.Vgg19('./vgg19.npy', False)
vgg2 = vgg19.Vgg19('./vgg19.npy', False)
vgg1.build(GOUT, train_mode)
vgg2.build(Yp, train_mode)

loss_mse = tf.reduce_mean(tf.reduce_sum(tf.square(Yp - GOUT)))
TrainStep_mse = tf.train.AdamOptimizer(0.0001).minimize(loss_mse)

loss_content = tf.reduce_mean(
    tf.reduce_sum(tf.square(vgg2.conv2_2 - vgg1.conv2_2)))
TrainStep_content = tf.train.AdamOptimizer(0.0001).minimize(loss_content)

loss_G = -tf.reduce_mean(tf.log(tf.clip_by_value(DOUT_G, 1e-10, 1.0)))
loss_LSR = 0.001 * loss_G + loss_mse
TrainStep_LSR = tf.train.AdamOptimizer(0.0001).minimize(loss_LSR,
                                                        var_list=Gvarlist)
Exemple #14
0
def main(_):
    worker_hosts = FLAGS.worker_hosts.split(",")

    # create the cluster configured by `ps_hosts' and 'worker_hosts'
    cluster = tf.train.ClusterSpec({"worker": worker_hosts})

    # create a server for local task
    server = tf.train.Server(cluster,
                             job_name=FLAGS.job_name,
                             task_index=FLAGS.task_index)

    if FLAGS.job_name == "ps":
        server.join()
    elif FLAGS.job_name == "worker":
        with tf.device(
                tf.train.replica_device_setter(
                    worker_device="/job:worker/task:%d" % (FLAGS.task_index),
                    cluster=cluster)):
            img1 = utils.load_image("./test_data/tiger.jpeg")
            img1_true_result = [1 if i == 292 else 0
                                for i in range(1000)]  # 1-hot result for tiger
            batch1 = img1.reshape((1, 224, 224, 3))

            images = tf.placeholder(tf.float32, [1, 224, 224, 3])
            true_out = tf.placeholder(tf.float32, [1, 1000])
            train_mode = tf.placeholder(tf.bool)

            vgg = vgg19.Vgg19('./vgg19.npy')
            vgg.build(images, train_mode)

            print(vgg.get_var_count())

            # The StopAtStepHook handles stopping after running given steps.
            hooks = [tf.train.StopAtStepHook(last_step=10000)]

            global_step = tf.train.get_or_create_global_step()
            optimizer = tf.train.AdamOptimizer(learning_rate=1e-04)

            if FLAGS.is_sync:
                # asynchronous training
                # use tf.train.SyncReplicasOptimizer wrap optimizer
                # ref: https://www.tensorflow.org/api_docs/python/tf/train/SyncReplicasOptimizer
                optimizer = tf.train.SyncReplicasOptimizer(
                    optimizer,
                    replicas_to_aggregate=FLAGS.num_workers,
                    total_num_replicas=FLAGS.num_workers)
                # create the hook which handles initialization and queues
                hooks.append(
                    optimizer.make_session_run_hook((FLAGS.task_index == 0)))

            loss = tf.reduce_sum((vgg.prob - true_out)**2)
            train_op = optimizer.minimize(
                loss,
                global_step=global_step,
                aggregation_method=tf.AggregationMethod.ADD_N)

            # The MonitoredTrainingSession takes care of session initialization,
            # restoring from a checkpoint, saving to a checkpoint, and closing when done
            # or an error occurs.
            with tf.train.MonitoredTrainingSession(
                    master=server.target,
                    is_chief=(FLAGS.task_index == 0),
                    checkpoint_dir="./checkpoint_dir",
                    hooks=hooks) as mon_sess:
                # mon_sess.run(tf.global_variables_initializer())
                while not mon_sess.should_stop():
                    _, prob, step = mon_sess.run(
                        [train_op, vgg.prob, global_step],
                        feed_dict={
                            images: batch1,
                            true_out: [img1_true_result],
                            train_mode: True
                        })
                    if step % 100 == 0:
                        print("Train step %d" % step)
                        utils.print_prob(prob[0], './synset.txt')
Exemple #15
0
    varlist = [
        Wconv1, Bconv1, Wconv2, Bconv2, Wconv3, Bconv3, Wconv4, Bconv4, Wconv5,
        Bconv5, Wconv6, Bconv6, Wconv7, Bconv7, Wconv8, Bconv8, Wfc1, Bfc1,
        Wfc2, Bfc2
    ]
    return Gout, Dout, varlist


Xp = tf.placeholder(tf.float32, shape=[None, None, None, 3])
Yp = tf.placeholder(tf.float32, shape=[None, None, None, 3])

GOUT, Gvarlist = Generator(Xp)
DOUT_G, DOUT_D, Dvarlist = Discriminator(GOUT, Yp)

train_mode = tf.placeholder(tf.bool)
vgg1 = vgg19.Vgg19('./vgg19.npy')
vgg2 = vgg19.Vgg19('./vgg19.npy')
vgg1.build(GOUT, train_mode)
vgg2.build(Yp, train_mode)

loss_mse = tf.reduce_mean(tf.reduce_sum(tf.square(vgg2.conv2_2 -
                                                  vgg1.conv2_2)))
TrainStep_mse = tf.train.AdamOptimizer(0.0001).minimize(loss_mse)

loss_G = -tf.reduce_mean(tf.log(tf.clip_by_value(DOUT_G, 1e-10, 1.0)))
loss_LSR = 0.001 * loss_G + 0.999 * loss_mse
TrainStep_LSR = tf.train.AdamOptimizer(0.0001).minimize(loss_LSR,
                                                        var_list=Gvarlist)

loss_D = -tf.reduce_mean(
    tf.log(tf.clip_by_value(DOUT_D, 1e-10, 1.0)) +
Exemple #16
0
        else:
            labels = np.append(labels, [[0, 0, 0, 0, 1]], axis=0)
            #label.resize(1,5)

        images_list.append(image)
labels = np.delete(labels, 0, 0)
print(labels.shape)

with tf.device('/gpu:0'):
    sess = tf.Session()

    images = tf.placeholder(tf.float32, [1, 224, 224, 3])
    true_out = tf.placeholder(tf.float32, [1, 1000])
    train_mode = tf.placeholder(tf.bool)

    vgg = vgg19.Vgg19('./vgg16_4.npy')
    vgg.build(images, train_mode)

    print(vgg.get_var_count())

    sess.run(tf.global_variables_initializer())
    correct = 0
    count = 0

    #load image
    dir_path = './train_resized_image'
    files = os.listdir(dir_path)

    for each in files:
        path = os.path.join(dir_path, each)
        img1 = utils.load_image(path)
Exemple #17
0
        for i in range(start, end):
            private_img_placeholder[i - start, :, :, :] = utils.load_image(
                private_imgs[i])
        return private_img_placeholder


with tf.device('/gpu:0'):
    sess = tf.Session()

    if not HAVE_LABEL:

        images = tf.placeholder(tf.float32, [None, 224, 224, 3])
        true_out = tf.placeholder(tf.float32, [None, 8])
        train_mode = tf.placeholder(tf.bool)

        vgg = vgg19.Vgg19(NPY_FILE, False)
        vgg.build(images, train_mode)
        for i in range(0, 200):
            test_img = parse_img(i * 10, (i + 1) * 10)
            test_prob = sess.run(vgg.prob,
                                 feed_dict={
                                     images: test_img,
                                     train_mode: False
                                 })
            #private_prob = sess.run(vgg.prob, feed_dict={images:private_img, train_mode:False})
            test_result = sess.run(tf.argmax(test_prob, 1))
            #private_result = sess.run(tf.argmax(private_prob, 1))

            print(test_result)
            #print(private_result)
Exemple #18
0
lookup = {'normals': 0, 'benigns': 1, 'cancers': 2}
imgfiles = [fl for fl in os.listdir(DATAPATH) if '.png' in fl]
for fii, fl in enumerate(imgfiles):
    rawlabel = fl.split('-')[-1].replace('.png', '')
    if rawlabel not in metadata: metadata[rawlabel] = []
    metadata[rawlabel].append('%s/%s' % (DATAPATH, fl))

with open('evaldata.json') as fl:
    dtest = json.load(fl)

sess = tf.Session()
images = tf.placeholder(tf.float32, [BATCHSIZE, IMSIZE, IMSIZE, CDIM])
true_out = tf.placeholder(tf.float32, [BATCHSIZE, NETSIZE])
train_mode = tf.placeholder(tf.bool)

vgg = vgg19.Vgg19('./checkpoint-cancer.npy')
vgg.build(images, train_mode, imsize=IMSIZE)

sess.run(tf.global_variables_initializer())
numbatches = int(len(dtest) / BATCHSIZE)
correct = 0
tally = 0
for bii in range(numbatches):
    batchinds = dtest[bii * BATCHSIZE:(bii + 1) * BATCHSIZE]
    if len(batchinds) < BATCHSIZE: continue
    batch = np.array([get_image(metadata[fl][ind]) for fl, ind in batchinds])
    labels = [[1.0 if lookup[fl] == ii else 0.0 for ii in range(NETSIZE)]
              for fl, _ in batchinds]
    prob = sess.run(vgg.prob, feed_dict={images: batch, train_mode: False})
    for ii, ent in enumerate(prob):
        if np.argmax(ent) == lookup[batchinds[ii][0]]:
Exemple #19
0
def main(_):
    tr.init()

    global_step = tf.Variable(0, name="global_step", trainable=False)
    with tf.name_scope('input'):
        sess = tf.Session()

        images = tf.placeholder(tf.float32, [batch_size, 224, 224, 3])
        true_out = tf.placeholder(tf.float32, [batch_size, 1000])
        train_mode = tf.placeholder(tf.bool)

        #vgg = vgg19.Vgg19('./vgg19.npy')
        vgg = vgg19.Vgg19()
        vgg.build(images, train_mode)

        # print number of variables used: 143667240 variables, i.e. ideal size = 548MB
        print(vgg.get_var_count())

        sess.run(tf.global_variables_initializer())

        batch, actuals = get_next_batch(batch_size)
        # test classification
        prob = sess.run(vgg.prob, feed_dict={images: batch, train_mode: False})
        #utils.print_prob(prob[0], './synset.txt')

    print("ok5")
    with tf.name_scope('train'):
        loss = tf.reduce_sum((vgg.prob - true_out)**2)
        optimizer = tf.train.GradientDescentOptimizer(0.0001)
        optimizer = tr.DistributedOptimizer(optimizer)
        train_step = optimizer.minimize(loss, global_step=global_step)

    print("ok7")

    #add our code
    hooks = [
        tr.BroadcastGlobalVariablesHook(0),
        tf.train.StopAtStepHook(last_step=10000),
        tf.train.LoggingTensorHook(tensors={
            'step': global_step,
            'loss': loss
        },
                                   every_n_iter=1),
    ]
    # Pin GPU to be used to process local rank (one GPU per process)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    #config.gpu_options.visible_device_list = str(bq.local_rank())

    # Save checkpoints only on worker 0 to prevent other workers from corrupting them.
    #checkpoint_dir = './checkpoints' if tr.rank() == 0 else None
    checkpoint_dir = None
    # The MonitoredTrainingSession takes care of session initialization,
    # restoring from a checkpoint, saving to a checkpoint, and closing when done
    # or an error occurs.
    train_images = []
    train_labels = []
    print("ok 9")
    cnt = 0
    start_t = 0
    end_t = 0
    with tf.train.MonitoredTrainingSession(checkpoint_dir=checkpoint_dir,
                                           hooks=hooks,
                                           config=config) as mon_sess:
        start_t = time.time()
        while not mon_sess.should_stop():
            #for i in range(10000):
            # Run a training step synchronously.
            #print("start")
            #print(cnt)

            batch, actuals = get_next_batch(batch_size)

            mon_sess.run(train_step,
                         feed_dict={
                             images: batch,
                             true_out: actuals,
                             train_mode: True
                         })
            cnt = cnt + 1
            if (cnt % 100 == 0):
                end_t = time.time()
                inter_val = end_t - start_t
                start_t = end_t
                print("cnt = %d interval = %d" % (cnt, inter_val))
            #print("FIN")
            #print(cnt)
            # test classification again, should have a higher probability about tiger
            #prob = mon_sess.run(vgg.prob, feed_dict={images: batch1, train_mode: False})
        print("DONE")
        print(cnt)
Exemple #20
0
        if fl not in lookup: lookup[fl] = fii
        metadata[fl] = ['%s/%s/%s' % (DATAPATH, fl, img) for img in os.listdir('%s/%s' % (DATAPATH, fl)) if '.jpg' in img]
        #for ii in range(len(metadata[fl])):
        #       datainds.append((fl, ii))
        #print(fl, len(metadata[fl]))

with open('evaldata.json') as fl:
    dtest = json.load(fl)


sess = tf.Session()
images = tf.placeholder(tf.float32, [BATCHSIZE, IMSIZE, IMSIZE, CDIM])
true_out = tf.placeholder(tf.float32, [BATCHSIZE, NETSIZE])
train_mode = tf.placeholder(tf.bool)

vgg = vgg19.Vgg19('./checkpoint.npy')
vgg.build(images, train_mode)

# print number of variables used: 143667240 variables, i.e. ideal size = 548MB
print('Trainable vars:', vgg.get_var_count())

sess.run(tf.global_variables_initializer())
cost = tf.reduce_sum((vgg.prob - true_out) ** 2)
train = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
numbatches = int(len(dtest) / BATCHSIZE)
correct = 0
tally = 0
for bii in range(numbatches):
	batchinds = dtest[bii*BATCHSIZE:(bii+1) * BATCHSIZE]
	if len(batchinds) < BATCHSIZE: continue
	batch = np.array([get_image(metadata[fl][ind]) for fl, ind in batchinds])
from VggNet import utils

img1 = utils.load_image("./test_data/tiger.jpeg")
img1_true_result = [1 if i == 292 else 0
                    for i in range(1000)]  # 1-hot result for tiger

batch1 = img1.reshape((1, 224, 224, 3))

with tf.device('/cpu:0'):
    sess = tf.Session()

    images = tf.placeholder(tf.float32, [1, 224, 224, 3])
    true_out = tf.placeholder(tf.float32, [1, 1000])
    train_mode = tf.placeholder(tf.bool)

    vgg = vgg19.Vgg19('./vgg19.npy')
    vgg.build(images, train_mode)

    # print number of variables used: 143667240 variables, i.e. ideal size = 548MB
    print(vgg.get_var_count())

    sess.run(tf.global_variables_initializer())

    # test classification
    prob = sess.run(vgg.prob, feed_dict={images: batch1, train_mode: False})
    utils.print_prob(prob[0], './synset.txt')

    # simple 1-step training
    cost = tf.reduce_sum((vgg.prob - true_out)**2)
    train = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
    sess.run(train,
Exemple #22
0
print('csv loading finish')

batch_size = 32
STEPS = 50000

#batch1,label1=get_image(batch_size, './train', images_list, labels)

with tf.device('/gpu:0'):
    sess = tf.Session()

    images = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
    true_out = tf.placeholder(tf.float32, shape=(batch_size, 5))
    train_mode = tf.placeholder(tf.bool)

    vgg = vgg19.Vgg19('./vgg16_1.npy')
    train_mode = tf.constant(True, dtype=tf.bool)
    vgg.build(images, train_mode)

    print('database prepared')

    sess.run(tf.global_variables_initializer())
    # simple 1-step training
    cost = tf.reduce_sum((vgg.prob - true_out)**2)
    train = tf.train.GradientDescentOptimizer(0.0000001).minimize(cost)
    for i in range(STEPS):
        batch_train, label_train = get_image(batch_size,
                                             './train_resized_image',
                                             images_list, labels)
        if i % 500 == 0:
            total_cost = sess.run(cost,