Exemple #1
0
	def train(self):
		if self.parent.signal == 1:
			train_model = train.TrainModel(self.train_data_dir,self.test_data_dir,'1','1','1',self.m_richText1,self.m_button1)
			thread.start_new_thread(train_model.save_bottlebeck_features,())
		elif self.parent.signal == 2:
			train_model = train.TrainModel(self.train_data_dir,self.test_data_dir,self.optimizerType,self.learnRate,self.epoch_num,self.m_richText1,self.m_button1)
			thread.start_new_thread(train_model.train_top_model,())
		elif self.parent.signal == 3:
			train_model = train.TrainModel('1','1','1','1','1',self.m_richText1,self.m_button1)
			thread.start_new_thread(train_model.predict,())
Exemple #2
0
def main():
    if not os.path.isdir(flag.output_dir):
        os.mkdir(flag.output_dir)
    if flag.mode == 'train':
        train_op = train.TrainModel(flag)
        train_op.train()
    elif flag.mode == 'predict_img':
        predict_image(flag)
    elif flag.mode == 'predict_imgDir':
        print ('not supported')
    else:
        print ('not supported')
Exemple #3
0
def train_and_tune(train_data,valid_data):
	#optimizers = ['SGD', 'RMSprop', 'Adam']
	learn_rate = [0.010,0.025,0.050]
	#activation_function = ['softmax', 'softplus', 'softsign']
	#dropout_rate = [0.25,0.50,0.75]
	train_conv_layer = [10,15,20,25]
	#denselayer_size = [512,1024,2048]




	log_file_path = os.path.join(os.getcwd(),'logs')
	#valid_data = 'fer2013/PublicTest1'
	#valid_data = 'drive/COLAB_NOTEBOOKS/sample/PublicTest1'
	best_models_path = os.path.join(os.getcwd(),'models','best_models')


	make_dirs(log_file_path)
	make_dirs(best_models_path)




	img_width, img_height = 224, 224
	num_channels = 3
	num_classes = 7
	batch_size = 32
	patience = 50

	epoch=3
	if 'Under_90' in train_data:
		epoch = 10
	if 'Under_10' in train_data:
		epoch = 5
	

	valid_data_gen = ImageDataGenerator(rescale=1.0/255)
	valid_generator = valid_data_gen.flow_from_directory(valid_data, (img_width, img_height), batch_size=batch_size,class_mode='categorical')
	fnames = valid_generator.filenames
	ground_truth = valid_generator.classes
	
	# print("Loading Resnet")
	# #resnet_conv = resnet50.ResNet50(weights='imagenet', include_top=False, input_shape=(img_height,img_width, num_channels))
	# resnet152 = module_from_file("resnet152_keras","resnet152_keras.py")
	# resnet_conv = resnet152.ResNet152(include_top=False, weights='imagenet',input_shape=(img_height,img_width, num_channels))
	# print("Resnet Loaded")

	resnet152 = module_from_file("resnet152_keras","resnet152_keras.py")

	cur_best = None
	cur_error = np.inf

	best_lr = -1
	best_conv = -1

	print("learning_rate,train_conv_layer,performance",file=open("tuning_results.txt","w"))

	print("Starting log file.....",file=open(os.path.join(log_file_path,"Tune.log"),"w"))

	for lr in learn_rate:
		for conv in train_conv_layer:
			print("Loading Resnet")
			#resnet_conv = resnet50.ResNet50(weights='imagenet', include_top=False, input_shape=(img_height,img_width, num_channels))
			
			#resnet_conv = resnet152.ResNet152(include_top=False, weights='imagenet',input_shape=(img_height,img_width, num_channels))
			resnet_conv = resnet50.ResNet50(include_top=False, weights='imagenet',input_shape=(img_height,img_width, num_channels))

			print("Resnet Loaded")
			print("Training Model With Hyper Paramters:learning_rate {}, train_conv_layer {} ".format(lr,conv))
			print("Training Model With Hyper Paramters: learning_rate {}, train_conv_layer {} ".format(lr,conv),file=open(os.path.join(log_file_path,"Tune.log"),"a"))
			model = train.TrainModel(train_data=train_data,valid_data=valid_data,resnet_conv =resnet_conv,learning_rate=lr,train_conv_layer=conv,num_epochs=epoch)
			
			print("Model Trained.............Validation...........")
			predictions = model.predict_generator(valid_generator, steps=valid_generator.samples/valid_generator.batch_size,verbose=1)
			predicted_classes = np.argmax(predictions,axis=1)

			errors = np.where(predicted_classes != ground_truth)[0]
			print("No of errors = {}/{}".format(len(errors),valid_generator.samples),file=open(os.path.join(log_file_path,"Tune.log"),"a"))


			print("{},{},{}/{}".format(lr,conv,len(errors),valid_generator.samples),file=open("tuning_results.txt","a"))

			if(len(errors)<cur_error):
				cur_best = model
				cur_error = len(errors)
				print("Found New Best Model with params, learning_rate {}, train_conv_layer {}-->errors: {}".format(lr,conv,1.0*len(errors)/valid_generator.samples),file=open(os.path.join(log_file_path,"Tune.log"),"a"))
				saveModel(model,best_models_path,str(lr)+'_'+str(conv),log_file_path)
				best_lr = lr
				best_conv = conv

	print(best_lr,best_conv,file=open("hyperparameter.txt","w"))
import train
import ModelControl
import dataset

import torch
import time
import datetime
import os

import configs
configs.init()
# General specifications
# Model specifications
# Training specifications

# Dataset Initialisation
dataset = dataset.Dataset()

# Model Building
model = ModelControl.createNetwork(depth=configs.depth)

# Training
train_model = train.TrainModel(model, dataset, PATH=configs.PATH_TO_LOG)

history = train_model.train_eval(max_epochs=configs.max_epochs,
                                 numBatchesperEpoch=configs.numBatchesperEpoch,
                                 patience=configs.patience,
                                 optim=configs.optimizer,
                                 lr=configs.initialLR,
                                 isCEWeightsDynamic=configs.isCEWeightsDynamic)
Exemple #5
0
 def train(self):
     train_model = train.TrainModel('1', '1', '1', '1', self.m_richText1,
                                    self.m_button1)
     thread.start_new_thread(train_model.predict2, (self.picPath, ))
                default="vgg19",
                type=str)
#ap.add_argument('--hidden_units', type=int, dest="hidden_units", action="store", default=120)

pa = ap.parse_args()
dirpath = pa.data_dir
path = pa.save_dir
lr = pa.learning_rate
structure = pa.arch
dropout = pa.dropout
#hidden_layer1 = pa.hidden_units
power = pa.gpu
epochs = pa.epochs

trainloader, v_loader, testloader, classidx = train.LoadData(dirpath)

model, criterion, optimizer = train.SetupNuralNet(ImgNetModel=structure,
                                                  lr=lr,
                                                  dropout=dropout)

model = train.TrainModel(model,
                         optimizer,
                         criterion,
                         epochs,
                         trainLoader=trainloader,
                         validationLoader=v_loader,
                         device='cuda')

train.SaveCheckpoint(model, classidx, epochs, path=path, ModelName=structure)

print("done Done london")
Exemple #7
0
import sys, os
from PIL import Image
import numpy as np
import pandas as pd

if len(sys.argv) <= 1:
    quit()

image_size = 50
input_dir = 'images'
categories = [name for name in os.listdir(input_dir) if name != ".DS_Store"]

X = []
for file_name in sys.argv[1:]:
    img = Image.open(file_name)
    img = img.convert("RGB")
    img = img.resize((image_size, image_size))
    in_data = np.asarray(img)
    X.append(in_data)

X = np.array(X)

model = train.TrainModel().train(X.shape[1:])
model.load_weights("./model/flower-model.hdf5")

predict = model.predict(X)

for pre in predict:
    y = pre.argmax()
    print("花の名前 : ", categories[y])