Esempio n. 1
0
def constantize(fname):
    K.clear_session()
    tf.reset_default_graph()
    K.set_learning_phase(False)
    mod = models.load_model(fname)
    outputs = mod.output
    if not isinstance(outputs, collections.Sequence):
        outputs = [outputs]
    output_names = []
    for output in outputs:
        output_names.append(output.name.split(':')[0])
    sess = K.get_session()
    cg = graph_util.convert_variables_to_constants(
        sess, sess.graph.as_graph_def(add_shapes=True), output_names)
    K.clear_session()
    return cg
Esempio n. 2
0
	def onBeginTraining(self):
		ue.log("starting mnist keras cnn training")

		model_file_name = "mnistKerasCNN"
		model_directory = ue.get_content_dir() + "/Scripts/"
		model_sess_path =  model_directory + model_file_name + ".tfsess"
		model_json_path = model_directory + model_file_name + ".json"

		my_file = Path(model_json_path)

		#reset the session each time we get training calls
		K.clear_session()

		#let's train
		batch_size = 128
		num_classes = 10
		epochs = 8

		# input image dimensions
		img_rows, img_cols = 28, 28

		# the data, shuffled and split between train and test sets
		(x_train, y_train), (x_test, y_test) = mnist.load_data()

		if K.image_data_format() == 'channels_first':
			x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
			x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
			input_shape = (1, img_rows, img_cols)
		else:
			x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
			x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
			input_shape = (img_rows, img_cols, 1)

		x_train = x_train.astype('float32')
		x_test = x_test.astype('float32')
		x_train /= 255
		x_test /= 255
		ue.log('x_train shape:' + str(x_train.shape))
		ue.log(str(x_train.shape[0]) + 'train samples')
		ue.log(str(x_test.shape[0]) + 'test samples')

		# convert class vectors to binary class matrices
		y_train = keras.utils.to_categorical(y_train, num_classes)
		y_test = keras.utils.to_categorical(y_test, num_classes)

		model = Sequential()
		model.add(Conv2D(64, kernel_size=(3, 3),
						  activation='relu',
						  input_shape=input_shape))
		
		# model.add(Dropout(0.2))
		# model.add(Flatten())
		# model.add(Dense(512, activation='relu'))
		# model.add(Dropout(0.2))
		# model.add(Dense(num_classes, activation='softmax'))

		#model.add(Conv2D(64, (3, 3), activation='relu'))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))
		model.add(Flatten())
		model.add(Dense(128, activation='relu'))
		model.add(Dropout(0.5))
		model.add(Dense(num_classes, activation='softmax'))

		model.compile(loss=keras.losses.categorical_crossentropy,
					  optimizer=keras.optimizers.Adadelta(),
					  metrics=['accuracy'])

		model.fit(x_train, y_train,
				  batch_size=batch_size,
				  epochs=epochs,
				  verbose=1,
				  validation_data=(x_test, y_test),
				  callbacks=[self.stopcallback])
		score = model.evaluate(x_test, y_test, verbose=0)
		ue.log("mnist keras cnn training complete.")
		ue.log('Test loss:' + str(score[0]))
		ue.log('Test accuracy:' + str(score[1]))

		self.session = K.get_session()
		self.model = model

		stored = {'model':model, 'session': self.session}

		#run a test evaluation
		ue.log(x_test.shape)
		result_test = model.predict(np.reshape(x_test[500],(1,28,28,1)))
		ue.log(result_test)

		#flush the architecture model data to disk
		#with open(model_json_path, "w") as json_file:
		#	json_file.write(model.to_json())

		#flush the whole model and weights to disk
		#saver = tf.train.Saver()
		#save_path = saver.save(K.get_session(), model_sess_path)
		#model.save(model_path)

		
		return stored
 def close(self):
     backend.clear_session()
Esempio n. 4
0
                         workers=12,
                         callbacks=[history])
 
 
# Save model
model_backup_path = os.path.join(r'G:\study\machine learning\competition\analytics Vidya Computer Vision\model3.h5')
classifier.save(model_backup_path)
print("Model saved to", model_backup_path)
 
# Save loss history to file
loss_history_path = os.path.join(r'G:\study\machine learning\competition\analytics Vidya Computer Vision\loss_history.log')
myFile = open(loss_history_path, 'w+')
myFile.write(history.losses)
myFile.close()
 
backend.clear_session()
print("The model class indices are:", training_set.class_indices)

import keras
import tensorflow as tf
from keras.models import load_model
from keras.preprocessing import image
import numpy as np
import os
from PIL import Image
images = []
path1=r"G:\study\machine learning\competition\analytics Vidya Computer Vision\test_real\\"
for img in os.listdir(r"G:\study\machine learning\competition\analytics Vidya Computer Vision\test_real"):
    img = image.load_img(path1+img, target_size=(192, 192))
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
Esempio n. 5
0
 def close(self):
     backend.clear_session()
Esempio n. 6
0
        self.grad_values = None
        return grad_values

evaluator = Evaluator()

x = np.random.uniform(0, 255, (1, height, width, 3)) - 128.


iterations = 200
for i in range(iterations):
    print('Start of iteration', i)
    start_time = time.time()
    x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
                                     fprime=evaluator.grads, maxfun=20)
    print('Current loss value:', min_val)
    end_time = time.time()
    print('Iteration %d completed in %ds' % (i, end_time - start_time))


K.clear_session() # Otherwise there is an error for session

x = x.reshape((height, width, 3))
x = x[:, :, ::-1]
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = np.clip(x, 0, 255).astype('uint8')
print("Image generated from {0} and {1}".format(content_image_path, style_image_path))
img = Image.fromarray(x)
img.save("out.jpg")
Esempio n. 7
0
                dprint('Loss Classifier classifier: {}'.format(
                    (loss_class_cls)))
                dprint('Loss Classifier regression: {}'.format(
                    (loss_class_regr)))
                dprint('Elapsed time: {}'.format(time.time() - start_time))
            else:
                dprint(
                    'loss_rpn_cls,{},loss_rpn_regr,{},loss_class_cls,{},loss_class_regr,{},class_acc,{},elapsed_time,{}'
                    .format(loss_rpn_cls, loss_rpn_regr, loss_class_cls,
                            loss_class_regr, class_acc,
                            time.time() - start_time))
            curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
            iter_num = 0
            start_time = time.time()
            epoch_num += 1
            if epoch_num == 1 or curr_loss < best_loss:
                if C.verbose:
                    dprint(
                        'Total loss decreased from {} to {}, saving weights'.
                        format(best_loss, curr_loss))
                best_loss = curr_loss
                model_all.save_weights(C.model_path)
        if epoch_num == num_epochs:
            dprint('Training complete, exiting.')
            sys.exit()
    except Exception as e:
        dprint('Exception: %s' % (e))
        continue

K.clear_session()