示例#1
0
from data_load_utils import *
from data_loader import *
from NN.CNN import *

loader = data_loader('train_samples.pkl', from_chunk=False)

X, Y = loader.load_samples(num_elements=8)

model = CNN(batch_size=8)

for i in range(1000):
    loss, acc = model.train_on_batch(X, Y, 0.01)
    if i + 1 % 10:
        print "epoch %d, accuracy %.4f, loss %.4f" % (i + 1, acc, loss)
示例#2
0
        # 	model.save_weights('model_weights_cnn_'+str(epoch_done+i+1)+'_'+str(curr_avg_acc)+'.h5',overwrite=True)
    return model


if __name__ == '__main__':
    assert (len(argv) >= 2)
    print "reading arguments"
    train_samples_file = argv[1]
    test_samples_file = argv[2]
    print "Initializing data loading"
    #loader_train1 = data_loader(train_samples_file, from_chunk = False)
    #loader_train2 = data_loader(train_samples_file, from_chunk = False)
    loader_train = data_loader_new(train_samples_file)
    test_loader = data_loader_new(test_samples_file)
    print "Creating CNN architecture....."
    model = CNN(batch_size=8)
    # model_arch_json = model.to_json()
    # pickle.dump(model_arch_json,open('model_cnn_more_droput.json.pkl','wb'))
    print "CNN architechture created"
    print "Starting Training..."
    #num_evaluate = 10
    #for i in range(num_evaluate):
    #	model = train_model_with_parallel_loading(model,loader,num_epoch=2)
    #	write_to_file("Evaluating model performance\n")
    #	model = evaluate_model_with_parallel_loading(model,test_loader,num_epoch=1)
    #model = train_model(model,loader)

    model = train_model_with_parallel_loading(model,
                                              loader_train,
                                              num_epoch=300,
                                              test_loader=test_loader,
示例#3
0
    # if os.fork() == 0:
    # 	while True:
    # 		if q_test.qsize() < 5:
    # 			Xt , Yt = test_loader.load_samples(num_elements=250)
    # 			#print "Loaded batch"
    # 			q_test.put([Xt,Yt])
    #X,Y = loader_train.load_samples(num_elements = samples_train_count)
    X_test, Y_test = test_loader.load_samples(num_elements=samples_test_count,
                                              transform=False)

    #datagen = ImageDataGenerator(featurewise_std_normalization=True,featurewise_center=True,rotation_range=20,width_shift_range=0.2,height_shift_range=0.2, horizontal_flip=True)
    #datagen.fit(X)
    #test_data_gen = ImageDataGenerator(featurewise_std_normalization=True,featurewise_center=True)
    #test_data_gen.fit(X)
    print "Creating CNN architecture....."
    model = CNN(batch_size=32)
    # model_arch_json = model.to_json()
    # pickle.dump(model_arch_json,open('model_cnn_more_droput.json.pkl','wb'))
    print "CNN architechture created"
    print "Starting Training..."
    #num_evaluate = 10
    #for i in range(num_evaluate):
    #	model = train_model_with_parallel_loading(model,loader,num_epoch=2)
    #	write_to_file("Evaluating model performance\n")
    #	model = evaluate_model_with_parallel_loading(model,test_loader,num_epoch=1)
    #model = train_model(model,loader)

    model = train_model_with_parallel_loading(
        model,
        q_train,
        X_test,
示例#4
0
from data_load_utils import *
from data_loader import *
from NN.CNN import *

loader = data_loader('train_samples.pkl',from_chunk=False)

X,Y = loader.load_samples(num_elements=8)

model = CNN(batch_size = 8)

for i in range(1000):
	loss, acc = model.train_on_batch(X,Y,0.01)
	if i+1 % 10: 
		print "epoch %d, accuracy %.4f, loss %.4f"%(i+1,acc,loss) 
from NN.CNN import *

from sys import argv

network = CNN(batch_size=5)

network.load_model_params_dumb(argv[1])

from pre_trained_embedd import *

embedder = image_embedder(img_files_list=argv[2::], network=network)

#hello world
示例#6
0
	# if os.fork() == 0:
	# 	while True:
	# 		if q_test.qsize() < 5:
	# 			Xt , Yt = test_loader.load_samples(num_elements=250)
	# 			#print "Loaded batch"
	# 			q_test.put([Xt,Yt])
	#X,Y = loader_train.load_samples(num_elements = samples_train_count)
	X_test, Y_test = test_loader.load_samples(num_elements=samples_test_count,transform=False)
	test_loader = None
	
	#datagen = ImageDataGenerator(featurewise_std_normalization=True,featurewise_center=True,rotation_range=20,width_shift_range=0.2,height_shift_range=0.2, horizontal_flip=True)
	#datagen.fit(X)
	#test_data_gen = ImageDataGenerator(featurewise_std_normalization=True,featurewise_center=True)
	#test_data_gen.fit(X)
	print "Creating CNN architecture....."
	model = CNN(batch_size=16)
	if len(argv) >= 4:
		model.load_model_params_dumb(argv[3]) 
	# model_arch_json = model.to_json()
	# pickle.dump(model_arch_json,open('model_cnn_more_droput.json.pkl','wb'))
	print "CNN architechture created"
	print "Starting Training..."
	#num_evaluate = 10
	#for i in range(num_evaluate):
	#	model = train_model_with_parallel_loading(model,loader,num_epoch=2)
	#	write_to_file("Evaluating model performance\n")
	#	model = evaluate_model_with_parallel_loading(model,test_loader,num_epoch=1)
	#model = train_model(model,loader)

	model = train_model_with_parallel_loading(model,q_train, X_test, Y_test, batch_size = 16, num_epoch=1000, samples_train_count=samples_train_count, samples_test_count=samples_test_count, datagen=None, test_data_gen=None)