Beispiel #1
0
			toreturn.append(data5)
	toreturn = ConvertTo3DVolume(toreturn)
	proba = model.predict(toreturn)
	return proba

img_rows, img_cols = 48, 48
nb_classes = 7
# model = model_generate()
model = big_XCEPTION((img_rows,img_cols,1), nb_classes)
model.compile(loss='categorical_crossentropy',
				  optimizer='adam',
				  metrics=['accuracy'])
filepath='Model.110-0.6810.hdf5'
model.load_weights(filepath)
model.summary()
test_set_x, test_set_y = dataprocessing.load_test_data()

proba = predict_prob(0,test_set_x,model)
proba1 = predict_prob(1,test_set_x,model)
proba2 = predict_prob(2,test_set_x,model)
proba3 = predict_prob(3,test_set_x,model)
proba4 = predict_prob(4,test_set_x,model)
proba5 = predict_prob(5,test_set_x,model)
proba6 = predict_prob(6,test_set_x,model)
proba7 = predict_prob(7,test_set_x,model)
Out = []
for row in zip(proba,proba1,proba2,proba3,proba4,proba5,proba6,proba7):
	a = numpy.argmax(np.array(row).mean(axis=0))
	Out.append(a)

Out = np.array(Out)
Beispiel #2
0
def train():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)

    ########################## Load Image ######################

    with tf.device('/cpu:0'):

        imgs_train, imgs_depth_train = load_train_data()
        imgs_train = imgs_train.astype('float32')

        imgs_depth_train = imgs_depth_train.astype('float32')
        imgs_depth_train = np.expand_dims(np.asarray(imgs_depth_train),
                                          axis=-1)

        imgs_test, imgs_id_test, imgs_depth_test = load_test_data()
        imgs_test = imgs_test.astype('float32')

        imgs_depth_test = imgs_depth_test.astype('float32')
        imgs_depth_test = np.expand_dims(np.asarray(imgs_depth_test), axis=-1)

    ########################## setupNet / placeholder / finetunLayer ######################

    images = tf.placeholder(tf.float32, shape=(None, height, width, channels))
    depths = tf.placeholder(tf.float32, [None, depth_height, depth_width, 1])

    net = Network({'data': images}, batch_size, 1, is_training=True
                  )  # inputs, batch, keep_prob, is_training, trainable = True

    fine_tuing_layers = ['res4a', 'res4b', 'res4c', 'res4d', 'res4e', 'res4f']
    tunning_params = []
    global_step = tf.Variable(0, name='global_step', trainable=False)

    for W in tf.trainable_variables():
        if "batch_normalization" not in W.name:
            print(W.name)
        for layer in fine_tuing_layers:
            if layer in W.name:
                print('tune')
                tunning_params.append(W)
                break

    ########################## Loss / optimizer ######################
    with tf.name_scope('loss'):
        loss = tf_mse_loss(depths, net.get_output())
        tf.summary.scalar('huber_loss', loss)

    lr_tune = tf.train.exponential_decay(1e-5,
                                         global_step,
                                         5000,
                                         0.1,
                                         staircase=True)

    with tf.name_scope('train'):
        train_op = tf.train.AdamOptimizer(learning_rate=lr_tune).minimize(
            loss, global_step=global_step, var_list=tunning_params)

    ########################## RunOptions / GPU option ######################
    #run_options = tf.RunOptions(report_tensor_allocations_upon_oom = True)

    # Assume that you have 8GB of GPU memory and want to allocate ~2.6GB:
    #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = 0.333)

    ########################## Sesstion ######################
    with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:

        #sess.run(op, feed_dict = fdict, options = run_options)
        sess.run(tf.global_variables_initializer())

        learnable_params = tf.trainable_variables()
        # define savers
        saver_learnable = tf.train.Saver(learnable_params, max_to_keep=4)
        print('Saver....')

        # log
        merged = tf.summary.merge_all()
        writer_train = tf.summary.FileWriter(logs_path_train, graph=sess.graph)
        writer_test = tf.summary.FileWriter(logs_path_test, graph=sess.graph)

        # Load the converted parameters
        print('Loading the model...')

        _ckpt = tf.train.get_checkpoint_state(CKPTDIR)

        if _ckpt and _ckpt.model_checkpoint_path:
            # Use to load from ckpt file#
            saver = tf.train.import_meta_graph(meta_path_restore)
            print("Meta graph restored successfully!")
            print('-' * 30)

            saver.restore(sess, _ckpt.model_checkpoint_path)
            print("Weights restored successfully!")
            print('-' * 30)

        #learnable_params = tf.trainable_variables()

        # initialize the queue threads to start to shovel data
        print('Start Coord and Threads')
        print('-' * 30)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        print('-' * 30)

        for epoch in range(EPOCHS):
            for i in range(1000):
                setp = epoch * 1000 + i

                start_time = time.time()

                _, loss_value, out_depth, train_summ = sess.run(
                    [train_op, loss, net.get_output(), merged],
                    feed_dict={
                        images: imgs_train,
                        depths: imgs_depth_train
                    })  # options = run_options
                writer_train.add_summary(train_summ, setp)  #train log
                #print(net.get_output())

                duration = time.time() - start_time

                if i % 10 == 0:
                    # To log validation accuracy.
                    validation_loss, pred, valid_summ = sess.run(
                        [loss, net.get_output(), merged],
                        feed_dict={
                            images: imgs_test,
                            depths: imgs_depth_test
                        })
                    writer_test.add_summary(valid_summ, setp)

                    print('sec_per_batch')
                    sec_per_batch = float(duration)
                    print(
                        "%s: %d[epoch]: %d[iteration]: train loss = %.4f : valid loss = %.4f : %.3f [sec/batch]"
                        % (datetime.now(), epoch, i, loss_value,
                           validation_loss, sec_per_batch))

        saver_learnable.save(sess, weights_checkpoint_path)

        # stop queue threads and properly close the session
        coord.request_stop()
        coord.join(threads)
        sess.close()