from NN.CNN import * from sys import argv network = CNN(batch_size=5) network.load_model_params_dumb(argv[1]) from pre_trained_embedd import * embedder = image_embedder(img_files_list=argv[2::], network=network) #hello world
# if q_test.qsize() < 5: # Xt , Yt = test_loader.load_samples(num_elements=250) # #print "Loaded batch" # q_test.put([Xt,Yt]) #X,Y = loader_train.load_samples(num_elements = samples_train_count) X_test, Y_test = test_loader.load_samples(num_elements=samples_test_count,transform=False) test_loader = None #datagen = ImageDataGenerator(featurewise_std_normalization=True,featurewise_center=True,rotation_range=20,width_shift_range=0.2,height_shift_range=0.2, horizontal_flip=True) #datagen.fit(X) #test_data_gen = ImageDataGenerator(featurewise_std_normalization=True,featurewise_center=True) #test_data_gen.fit(X) print "Creating CNN architecture....." model = CNN(batch_size=16) if len(argv) >= 4: model.load_model_params_dumb(argv[3]) # model_arch_json = model.to_json() # pickle.dump(model_arch_json,open('model_cnn_more_droput.json.pkl','wb')) print "CNN architechture created" print "Starting Training..." #num_evaluate = 10 #for i in range(num_evaluate): # model = train_model_with_parallel_loading(model,loader,num_epoch=2) # write_to_file("Evaluating model performance\n") # model = evaluate_model_with_parallel_loading(model,test_loader,num_epoch=1) #model = train_model(model,loader) model = train_model_with_parallel_loading(model,q_train, X_test, Y_test, batch_size = 16, num_epoch=1000, samples_train_count=samples_train_count, samples_test_count=samples_test_count, datagen=None, test_data_gen=None)