def train_khorrami_aug_oulu(wdecay,lr,route_iter,folds=[4,9],model_name='vgg_capsule_disfa',epoch_stuff=[30,60],res=False,meta_data_dir = 'train_test_files_preprocess_vl'): out_dirs = [] out_dir_meta = '../experiments/'+model_name+str(route_iter) num_epochs = epoch_stuff[1] epoch_start = 0 dec_after = ['exp',0.96,epoch_stuff[0],1e-6] lr = lr im_resize = 110 im_size = 96 save_after = num_epochs type_data = 'three_im_no_neutral_just_strong_False'; n_classes = 6; criterion = 'margin' criterion_str = criterion # criterion = nn.CrossEntropyLoss() # criterion_str = 'crossentropy' init = False strs_append = '_'+'_'.join([str(val) for val in ['all_aug',criterion_str,init,'wdecay',wdecay,num_epochs]+dec_after+lr]) pre_pend = 'oulu_96_'+meta_data_dir+'_'+type_data+'_' for split_num in folds: if res: strs_appendc = '_'.join([str(val) for val in ['all_aug','wdecay',wdecay,50,'step',50,0.1]+lr]) out_dir_train = os.path.join(out_dir_meta,'oulu_'+type_data+'_'+str(split_num)+'_'+strs_appendc) model_file = os.path.join(out_dir_train,'model_49.pt') epoch_start = 50 else: model_file = None margin_params = None out_dir_train = os.path.join(out_dir_meta,pre_pend+str(split_num)+strs_append) final_model_file = os.path.join(out_dir_train,'model_'+str(num_epochs-1)+'.pt') if os.path.exists(final_model_file): print 'skipping',final_model_file continue train_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'train_'+str(split_num)+'.txt') test_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'test_'+str(split_num)+'.txt') mean_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'train_'+str(split_num)+'_mean.png') std_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'train_'+str(split_num)+'_std.png') mean_im = scipy.misc.imread(mean_file).astype(np.float32) std_im = scipy.misc.imread(std_file).astype(np.float32) print std_im.shape print np.min(std_im),np.max(std_im) print mean_im.shape print np.min(mean_im),np.max(mean_im) class_weights = util.get_class_weights(util.readLinesFromFile(train_file)) # raw_input() list_of_to_dos = ['flip','rotate','scale_translate'] data_transforms = {} data_transforms['train']= transforms.Compose([ lambda x: augmenters.augment_image(x,list_of_to_dos,mean_im,std_im,im_size), transforms.ToTensor(), lambda x: x*255. ]) data_transforms['val']= transforms.Compose([ transforms.ToTensor(), lambda x: x*255. ]) # train_data = dataset.CK_96_Dataset_Just_Mean(train_file, mean_file, data_transforms['train']) # test_data = dataset.CK_96_Dataset_Just_Mean(test_file, mean_file, data_transforms['val']) train_data = dataset.CK_96_Dataset(train_file, mean_file, std_file, data_transforms['train']) test_data = dataset.CK_96_Dataset(test_file, mean_file, std_file, data_transforms['val']) network_params = dict(n_classes=n_classes,pool_type='max',r=route_iter,init=init, class_weights = class_weights) # if lr[0]==0: batch_size = 128 batch_size_val = 128 # else: # batch_size = 32 # batch_size_val = 16 util.makedirs(out_dir_train) train_params = dict(out_dir_train = out_dir_train, train_data = train_data, test_data = test_data, batch_size = batch_size, batch_size_val = batch_size_val, num_epochs = num_epochs, save_after = save_after, disp_after = 1, plot_after = 100, test_after = 1, lr = lr, dec_after = dec_after, model_name = model_name, criterion = criterion, gpu_id = 0, num_workers = 0, model_file = model_file, epoch_start = epoch_start, margin_params = margin_params, network_params = network_params, weight_decay=wdecay) print train_params param_file = os.path.join(out_dir_train,'params.txt') all_lines = [] for k in train_params.keys(): str_print = '%s: %s' % (k,train_params[k]) print str_print all_lines.append(str_print) util.writeFile(param_file,all_lines) train_model(**train_params) test_params = dict(out_dir_train = out_dir_train, model_num = num_epochs-1, train_data = train_data, test_data = test_data, gpu_id = 0, model_name = model_name, batch_size_val = batch_size_val, criterion = criterion, margin_params = margin_params, network_params = network_params) test_model(**test_params) # getting_accuracy.print_accuracy(out_dir_meta,pre_pend,strs_append,folds,log='log.txt') getting_accuracy.view_loss_curves(out_dir_meta,pre_pend,strs_append,folds,num_epochs-1)
def train_khorrami_aug(wdecay, lr, route_iter, folds=[4, 9], model_name='vgg_capsule_disfa', epoch_stuff=[30, 60], res=False, class_weights=False, reconstruct=False, loss_weights=None, model_to_test=None, oulu=False, dropout=0): out_dirs = [] out_dir_meta = '../experiments/showing_overfitting_justhflip_' + model_name + str( route_iter) num_epochs = epoch_stuff[1] if model_to_test is None: model_to_test = num_epochs - 1 epoch_start = 0 dec_after = ['step', epoch_stuff[0], 0.1] lr = lr im_resize = 110 im_size = 96 save_after = num_epochs if not oulu: type_data = 'train_test_files' n_classes = 8 train_pre = os.path.join('../data/ck_96', type_data) pre_pend = 'ck_96_' + type_data + '_' else: type_data = 'three_im_no_neutral_just_strong_False' n_classes = 6 # 'train_test_files'; n_classes = 8; train_pre = os.path.join( '../data/Oulu_CASIA/train_test_files_preprocess_vl', type_data) pre_pend = 'oulu_96_' + type_data + '_' criterion = 'margin' criterion_str = criterion init = False strs_append_list = [ 'reconstruct', reconstruct, class_weights, 'all_aug', criterion_str, init, 'wdecay', wdecay, num_epochs ] + dec_after + lr + [dropout] if loss_weights is not None: strs_append_list = strs_append_list + ['lossweights'] + loss_weights strs_append = '_' + '_'.join([str(val) for val in strs_append_list]) lr_p = lr[:] for split_num in folds: model_file = None margin_params = None out_dir_train = os.path.join(out_dir_meta, pre_pend + str(split_num) + strs_append) final_model_file = os.path.join(out_dir_train, 'model_' + str(num_epochs - 1) + '.pt') if os.path.exists(final_model_file): print 'skipping', final_model_file continue else: print 'not skipping', final_model_file train_file = os.path.join(train_pre, 'train_' + str(split_num) + '.txt') test_file = os.path.join(train_pre, 'test_' + str(split_num) + '.txt') mean_file = os.path.join(train_pre, 'train_' + str(split_num) + '_mean.png') std_file = os.path.join(train_pre, 'train_' + str(split_num) + '_std.png') mean_im = scipy.misc.imread(mean_file).astype(np.float32) std_im = scipy.misc.imread(std_file).astype(np.float32) class_weights = util.get_class_weights( util.readLinesFromFile(train_file)) list_of_to_dos = ['flip'] data_transforms = {} data_transforms['train'] = transforms.Compose([ lambda x: augmenters.augment_image(x, list_of_to_dos, mean_im, std_im, im_size), transforms.ToTensor(), lambda x: x * 255. ]) data_transforms['val'] = transforms.Compose( [transforms.ToTensor(), lambda x: x * 255.]) train_data = dataset.CK_96_Dataset(train_file, mean_file, std_file, data_transforms['train']) test_data = dataset.CK_96_Dataset(test_file, mean_file, std_file, data_transforms['val']) network_params = dict(n_classes=n_classes, pool_type='max', r=route_iter, init=init, class_weights=class_weights, reconstruct=reconstruct, loss_weights=loss_weights, dropout=dropout) batch_size = 128 batch_size_val = None util.makedirs(out_dir_train) train_params = dict(out_dir_train=out_dir_train, train_data=train_data, test_data=test_data, batch_size=batch_size, batch_size_val=batch_size_val, num_epochs=num_epochs, save_after=save_after, disp_after=1, plot_after=100, test_after=1, lr=lr, dec_after=dec_after, model_name=model_name, criterion=criterion, gpu_id=0, num_workers=2, model_file=model_file, epoch_start=epoch_start, margin_params=margin_params, network_params=network_params, weight_decay=wdecay) test_params = dict( out_dir_train=out_dir_train, model_num=model_to_test, # num_epochs-1, train_data=train_data, test_data=test_data, gpu_id=0, model_name=model_name, batch_size_val=batch_size_val, criterion=criterion, margin_params=margin_params, network_params=network_params) print train_params param_file = os.path.join(out_dir_train, 'params.txt') all_lines = [] for k in train_params.keys(): str_print = '%s: %s' % (k, train_params[k]) print str_print all_lines.append(str_print) util.writeFile(param_file, all_lines) if reconstruct: train_model_recon(**train_params) # test_model_recon(**test_params) else: train_model(**train_params)
def train_khorrami_aug(wdecay,lr,route_iter,folds=[4,9],model_name='vgg_capsule_disfa',epoch_stuff=[30,60],res=False, class_weights = False, reconstruct = False, oulu = False, meta_data_dir = None,loss_weights = None, exp = False, non_peak = False, model_to_test = None, dropout = None): out_dirs = [] out_dir_meta = '../experiments_dropout/'+model_name+str(route_iter) num_epochs = epoch_stuff[1] if model_to_test is None: model_to_test = num_epochs -1 epoch_start = 0 if exp: dec_after = ['exp',0.96,epoch_stuff[0],1e-6] # dec_after = ['exp',0.96,epoch_stuff[0],1e-6] else: dec_after = ['step',epoch_stuff[0],0.1] lr = lr im_resize = 110 im_size = 96 save_after = 100 if non_peak: type_data = 'train_test_files_non_peak_one_third'; n_classes = 8; train_pre = os.path.join('../data/ck_96',type_data) test_pre = os.path.join('../data/ck_96','train_test_files') else: type_data = 'train_test_files'; n_classes = 8; train_pre = os.path.join('../data/ck_96',type_data) test_pre = os.path.join('../data/ck_96',type_data) if oulu: type_data = 'three_im_no_neutral_just_strong_False'; n_classes = 6; criterion = 'margin' criterion_str = criterion # criterion = nn.CrossEntropyLoss() # criterion_str = 'crossentropy' init = False strs_append_list = ['reconstruct',reconstruct,class_weights,'flip',criterion_str,init,'wdecay',wdecay,num_epochs]+dec_after+lr+['dropout',dropout] if loss_weights is not None: strs_append_list = strs_append_list +['lossweights']+loss_weights strs_append = '_'+'_'.join([str(val) for val in strs_append_list]) if oulu: pre_pend = 'oulu_96_'+meta_data_dir+'_' else: pre_pend = 'ck_96_'+type_data+'_' lr_p=lr[:] for split_num in folds: if res: strs_appendc = '_'+'_'.join([str(val) for val in ['reconstruct',reconstruct,True,'all_aug',criterion_str,init,'wdecay',wdecay,600,'step',600,0.1]+lr_p]) out_dir_train = os.path.join(out_dir_meta,pre_pend+str(split_num)+strs_appendc) model_file = os.path.join(out_dir_train,'model_599.pt') epoch_start = 600 lr =[0.1*lr_curr for lr_curr in lr_p] else: model_file = None margin_params = None out_dir_train = os.path.join(out_dir_meta,pre_pend+str(split_num)+strs_append) final_model_file = os.path.join(out_dir_train,'model_'+str(num_epochs-1)+'.pt') if os.path.exists(final_model_file): print 'skipping',final_model_file # raw_input() continue else: print 'not skipping', final_model_file # raw_input() # continue if not oulu: # train_file = '../data/ck_96/train_test_files/train_'+str(split_num)+'.txt' # test_file = '../data/ck_96/train_test_files/test_'+str(split_num)+'.txt' # mean_file = '../data/ck_96/train_test_files/train_'+str(split_num)+'_mean.png' # std_file = '../data/ck_96/train_test_files/train_'+str(split_num)+'_std.png' train_file = os.path.join(train_pre,'train_'+str(split_num)+'.txt') test_file_easy = os.path.join(train_pre,'test_'+str(split_num)+'.txt') test_file = os.path.join(test_pre,'test_'+str(split_num)+'.txt') mean_file = os.path.join(train_pre,'train_'+str(split_num)+'_mean.png') std_file = os.path.join(train_pre,'train_'+str(split_num)+'_std.png') else: train_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'train_'+str(split_num)+'.txt') test_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'test_'+str(split_num)+'.txt') mean_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'train_'+str(split_num)+'_mean.png') std_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'train_'+str(split_num)+'_std.png') mean_im = scipy.misc.imread(mean_file).astype(np.float32) std_im = scipy.misc.imread(std_file).astype(np.float32) class_weights = util.get_class_weights(util.readLinesFromFile(train_file)) # print std_im.shape # print np.min(std_im),np.max(std_im) # raw_input() list_of_to_dos = ['flip'] # ,'rotate','scale_translate', 'pixel_augment'] data_transforms = {} data_transforms['train']= transforms.Compose([ lambda x: augmenters.augment_image(x,list_of_to_dos,mean_im,std_im,im_size), transforms.ToTensor(), lambda x: x*255. ]) data_transforms['val']= transforms.Compose([ transforms.ToTensor(), lambda x: x*255. ]) # train_data = dataset.CK_96_Dataset_Just_Mean(train_file, mean_file, data_transforms['train']) # test_data = dataset.CK_96_Dataset_Just_Mean(test_file, mean_file, data_transforms['val']) print train_file print test_file print std_file print mean_file # raw_input() train_data = dataset.CK_96_Dataset(train_file, mean_file, std_file, data_transforms['train']) train_data_no_t = dataset.CK_96_Dataset(test_file_easy, mean_file, std_file, data_transforms['val']) test_data = dataset.CK_96_Dataset(test_file, mean_file, std_file, data_transforms['val']) if dropout is not None: network_params = dict(n_classes=n_classes,pool_type='max',r=route_iter,init=init,class_weights = class_weights, reconstruct = reconstruct,loss_weights = loss_weights, dropout = dropout) else: network_params = dict(n_classes=n_classes,pool_type='max',r=route_iter,init=init,class_weights = class_weights, reconstruct = reconstruct,loss_weights = loss_weights) # if lr[0]==0: batch_size = 128 batch_size_val = 128 # else: # batch_size = 32 # batch_size_val = 16 util.makedirs(out_dir_train) train_params = dict(out_dir_train = out_dir_train, train_data = train_data, test_data = test_data, batch_size = batch_size, batch_size_val = batch_size_val, num_epochs = num_epochs, save_after = save_after, disp_after = 1, plot_after = 100, test_after = 1, lr = lr, dec_after = dec_after, model_name = model_name, criterion = criterion, gpu_id = 0, num_workers = 2, model_file = model_file, epoch_start = epoch_start, margin_params = margin_params, network_params = network_params, weight_decay=wdecay) test_params = dict(out_dir_train = out_dir_train, model_num = model_to_test, # num_epochs-1, train_data = train_data, test_data = test_data, gpu_id = 0, model_name = model_name, batch_size_val = batch_size_val, criterion = criterion, margin_params = margin_params, network_params = network_params) test_params_train = dict(**test_params) test_params_train['test_data'] = train_data_no_t test_params_train['post_pend'] = '_easy' print train_params param_file = os.path.join(out_dir_train,'params.txt') all_lines = [] for k in train_params.keys(): str_print = '%s: %s' % (k,train_params[k]) print str_print all_lines.append(str_print) util.writeFile(param_file,all_lines) # if reconstruct: train_model_recon(**train_params) # test_model_recon(**test_params) # else: # train_model(**train_params) # test_model(**test_params) getting_accuracy.print_accuracy(out_dir_meta,pre_pend,strs_append,folds,log='log.txt')
def train_khorrami_aug(wdecay,lr,route_iter,folds=[4,9],model_name='vgg_capsule_disfa',epoch_stuff=[30,60],res=False, class_weights = False, reconstruct = False): out_dirs = [] out_dir_meta = '../experiments/'+model_name+str(route_iter) num_epochs = epoch_stuff[1] epoch_start = 0 # dec_after = ['exp',0.96,epoch_stuff[0],1e-6] dec_after = ['step',epoch_stuff[0],0.1] lr = lr im_resize = 110 im_size = 96 save_after = 100 type_data = 'train_test_files'; n_classes = 8; criterion = 'margin' criterion_str = criterion # criterion = nn.CrossEntropyLoss() # criterion_str = 'crossentropy' init = False loss_weights = [1.,0.5,0.5] strs_append = '_'+'_'.join([str(val) for val in ['au_sup',loss_weights,'reconstruct',reconstruct,class_weights,'all_aug',criterion_str,init,'wdecay',wdecay,num_epochs]+dec_after+lr]) pre_pend = 'ck_96_' lr_p=lr[:] for split_num in folds: if res: strs_appendc = '_'+'_'.join([str(val) for val in ['reconstruct',reconstruct,True,'all_aug',criterion_str,init,'wdecay',wdecay,600,'step',600,0.1]+lr_p]) out_dir_train = os.path.join(out_dir_meta,pre_pend+str(split_num)+strs_appendc) model_file = os.path.join(out_dir_train,'model_300.pt') epoch_start = 300 lr =[0.1*lr_curr for lr_curr in lr_p] else: model_file = None margin_params = None out_dir_train = os.path.join(out_dir_meta,pre_pend+str(split_num)+strs_append) final_model_file = os.path.join(out_dir_train,'model_'+str(num_epochs-1)+'.pt') if os.path.exists(final_model_file): print 'skipping',final_model_file continue train_file = '../data/ck_96/train_test_files/train_emofacscombo_'+str(split_num)+'.txt' test_file = '../data/ck_96/train_test_files/test_emofacscombo_'+str(split_num)+'.txt' mean_file = '../data/ck_96/train_test_files/train_'+str(split_num)+'_mean.png' std_file = '../data/ck_96/train_test_files/train_'+str(split_num)+'_std.png' mean_im = scipy.misc.imread(mean_file).astype(np.float32) std_im = scipy.misc.imread(std_file).astype(np.float32) if class_weights: print class_weights actual_class_weights,au_class_weights = util.get_class_weights(util.readLinesFromFile(train_file),au=True) print actual_class_weights print au_class_weights # actual_class_weights = None # au_class_weights = None else: actual_class_weights = None au_class_weights = None # print std_im.shape # print np.min(std_im),np.max(std_im) # raw_input() list_of_to_dos = ['flip','rotate','scale_translate','pixel_augment'] data_transforms = {} data_transforms['train']= transforms.Compose([ lambda x: augmenters.augment_image(x,list_of_to_dos,mean_im,std_im,im_size), transforms.ToTensor(), lambda x: x*255. ]) data_transforms['val']= transforms.Compose([ transforms.ToTensor(), lambda x: x*255. ]) train_data = dataset.CK_96_Dataset_WithAU(train_file, mean_file, std_file, data_transforms['train']) test_data = dataset.CK_96_Dataset_WithAU(test_file, mean_file, std_file, data_transforms['val']) network_params = dict(n_classes=n_classes,pool_type='max',r=route_iter,init=init,class_weights = actual_class_weights, reconstruct = reconstruct,au_sup = True, class_weights_au = au_class_weights,loss_weights = loss_weights) # if lr[0]==0: batch_size = 128 batch_size_val = 128 # else: # batch_size = 32 # batch_size_val = 16 util.makedirs(out_dir_train) train_params = dict(out_dir_train = out_dir_train, train_data = train_data, test_data = test_data, batch_size = batch_size, batch_size_val = batch_size_val, num_epochs = num_epochs, save_after = save_after, disp_after = 1, plot_after = 100, test_after = 1, lr = lr, dec_after = dec_after, model_name = model_name, criterion = criterion, gpu_id = 0, num_workers = 0, model_file = model_file, epoch_start = epoch_start, margin_params = margin_params, network_params = network_params, weight_decay=wdecay) print train_params param_file = os.path.join(out_dir_train,'params.txt') all_lines = [] for k in train_params.keys(): str_print = '%s: %s' % (k,train_params[k]) print str_print all_lines.append(str_print) util.writeFile(param_file,all_lines) # if reconstruct: train_model_recon_au(**train_params) # else: # train_model(**train_params) # test_params = dict(out_dir_train = out_dir_train, # model_num = num_epochs-1, # train_data = train_data, # test_data = test_data, # gpu_id = 0, # model_name = model_name, # batch_size_val = batch_size_val, # criterion = criterion, # margin_params = margin_params, # network_params = network_params) # test_model(**test_params) getting_accuracy.print_accuracy(out_dir_meta,pre_pend,strs_append,folds,log='log.txt')
def trying_out_recon(wdecay, lr): for split_num in [4, 9]: out_dir_meta = '../experiments/oulu_with_recon_0.5_r3/' route_iter = 3 num_epochs = 100 epoch_start = 0 # dec_after = ['exp',0.96,3,1e-6] dec_after = ['step', 100, 0.1] lr = lr # [0.001] pool_type = 'max' im_size = 96 model_name = 'khorrami_capsule_reconstruct' save_after = 50 type_data = 'three_im_no_neutral_just_strong' n_classes = 6 # strs_append = '_'.join([str(val) for val in ['all_aug','wdecay',wdecay,pool_type,500,'step',500,0.1]+lr]) # out_dir_train = os.path.join(out_dir_meta,'oulu_'+type_data+'_'+str(split_num)+'_'+strs_append) # model_file = os.path.join(out_dir_train,'model_499.pt') # type_data = 'single_im' model_file = None criterion = 'margin' margin_params = None spread_loss_params = dict(end_epoch=int(num_epochs * 0.5), decay_steps=5, init_margin=0.5, max_margin=0.5) # spread_loss_params = {'end_epoch':int(num_epochs*0.9),'decay_steps':5,'init_margin' : 0.9, 'max_margin' : 0.9} strs_append = '_'.join([ str(val) for val in ['justflip', 'wdecay', wdecay, pool_type, num_epochs] + dec_after + lr ]) out_dir_train = os.path.join( out_dir_meta, 'oulu_' + type_data + '_' + str(split_num) + '_' + strs_append) print out_dir_train train_file = os.path.join( '../data/Oulu_CASIA', 'train_test_files_preprocess_maheen_vl_gray', type_data, 'train_' + str(split_num) + '.txt') test_file = os.path.join('../data/Oulu_CASIA', 'train_test_files_preprocess_maheen_vl_gray', type_data, 'test_' + str(split_num) + '.txt') mean_std_file = os.path.join( '../data/Oulu_CASIA', 'train_test_files_preprocess_maheen_vl_gray', type_data, 'train_' + str(split_num) + '_mean_std_val_0_1.npy') class_weights = util.get_class_weights( util.readLinesFromFile(train_file)) mean_std = np.load(mean_std_file) print mean_std data_transforms = {} data_transforms['train'] = transforms.Compose([ transforms.ToPILImage(), transforms.Resize((im_size, im_size)), # transforms.Resize((102,102)), # transforms.RandomCrop(im_size), transforms.RandomHorizontalFlip(), # transforms.RandomRotation(15), # transforms.ColorJitter(), transforms.ToTensor(), transforms.Normalize([float(mean_std[0])], [float(mean_std[1])]) ]) data_transforms['val'] = transforms.Compose([ transforms.ToPILImage(), transforms.Resize((im_size, im_size)), transforms.ToTensor(), transforms.Normalize([float(mean_std[0])], [float(mean_std[1])]) ]) # data_transforms['val_center']= transforms.Compose([ # transforms.ToPILImage(), # transforms.Resize((102,102)), # transforms.CenterCrop(im_size), # transforms.ToTensor(), # transforms.Normalize([float(mean_std[0])],[float(mean_std[1])]) # ]) train_data = dataset.Oulu_Static_Dataset(train_file, data_transforms['train']) test_data = dataset.Oulu_Static_Dataset(test_file, data_transforms['val']) # test_data_center = dataset.Oulu_Static_Dataset(test_file, data_transforms['val_center']) network_params = dict(n_classes=n_classes, spread_loss_params=spread_loss_params, pool_type=pool_type, r=route_iter, init=False, reconstruct=True, class_weights=class_weights) batch_size = 64 batch_size_val = 64 util.makedirs(out_dir_train) train_params = dict(out_dir_train=out_dir_train, train_data=train_data, test_data=test_data, batch_size=batch_size, batch_size_val=batch_size_val, num_epochs=num_epochs, save_after=save_after, disp_after=1, plot_after=10, test_after=1, lr=lr, dec_after=dec_after, model_name=model_name, criterion=criterion, gpu_id=0, num_workers=0, model_file=model_file, epoch_start=epoch_start, margin_params=margin_params, network_params=network_params, weight_decay=wdecay) print train_params param_file = os.path.join(out_dir_train, 'params.txt') all_lines = [] for k in train_params.keys(): str_print = '%s: %s' % (k, train_params[k]) print str_print all_lines.append(str_print) util.writeFile(param_file, all_lines) train_model(**train_params) test_params = dict(out_dir_train=out_dir_train, model_num=num_epochs - 1, train_data=train_data, test_data=test_data, gpu_id=0, model_name=model_name, batch_size_val=batch_size_val, criterion=criterion, margin_params=margin_params, network_params=network_params) test_model(**test_params)
def train_gray(wdecay,lr,route_iter,folds = [4,9],model_name='vgg_capsule_disfa',epoch_stuff=[30,60],res=False, reconstruct = False, oulu = False, meta_data_dir = 'train_test_files_preprocess_vl',loss_weights = None, exp = False, dropout = 0, gpu_id = 0, aug_more = 'flip', model_to_test = None): out_dir_meta = '../experiments_dropout/'+model_name+'_'+str(route_iter) num_epochs = epoch_stuff[1] if model_to_test is None: model_to_test = num_epochs -1 epoch_start = 0 if exp: dec_after = ['exp',0.96,epoch_stuff[0],1e-6] else: dec_after = ['step',epoch_stuff[0],0.1] lr = lr im_resize = 110 im_size = 96 save_after = 100 type_data = 'train_test_files'; n_classes = 8; train_pre = os.path.join('../data/ck_96',type_data) test_pre = os.path.join('../data/ck_96',type_data) if oulu: type_data = 'three_im_no_neutral_just_strong_False'; n_classes = 6; criterion = 'margin' criterion_str = criterion init = False strs_append_list = ['reconstruct',reconstruct]+aug_more+[num_epochs]+dec_after+lr+[dropout] if loss_weights is not None: strs_append_list = strs_append_list +['lossweights']+loss_weights strs_append = '_'+'_'.join([str(val) for val in strs_append_list]) if oulu: pre_pend = 'oulu_96_'+meta_data_dir+'_' else: pre_pend = 'ck_96_'+type_data+'_' lr_p=lr[:] for split_num in folds: if res: print 'what to res?' raw_input() else: model_file = None margin_params = None out_dir_train = os.path.join(out_dir_meta,pre_pend+str(split_num)+strs_append) print out_dir_train final_model_file = os.path.join(out_dir_train,'model_'+str(num_epochs-1)+'.pt') if os.path.exists(final_model_file): print 'skipping',final_model_file # raw_input() continue else: print 'not skipping', final_model_file # raw_input() # continue if not oulu: train_file = os.path.join(train_pre,'train_'+str(split_num)+'.txt') test_file_easy = os.path.join(train_pre,'test_'+str(split_num)+'.txt') test_file = os.path.join(test_pre,'test_'+str(split_num)+'.txt') mean_file = os.path.join(train_pre,'train_'+str(split_num)+'_mean.png') std_file = os.path.join(train_pre,'train_'+str(split_num)+'_std.png') else: train_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'train_'+str(split_num)+'.txt') test_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'test_'+str(split_num)+'.txt') mean_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'train_'+str(split_num)+'_mean.png') std_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'train_'+str(split_num)+'_std.png') mean_im = scipy.misc.imread(mean_file).astype(np.float32) std_im = scipy.misc.imread(std_file).astype(np.float32) class_weights = util.get_class_weights(util.readLinesFromFile(train_file)) list_of_to_dos = aug_more print list_of_to_dos # raw_input() # aug_more.split('_') # ['flip','rotate','scale_translate', 'pixel_augment'] data_transforms = {} if 'hs' in list_of_to_dos: print '**********HS!!!!!!!' list_transforms = [lambda x: augmenters.hide_and_seek(x)] if 'flip' in list_of_to_dos: list_transforms.append(lambda x: augmenters.horizontal_flip(x)) list_transforms = list_transforms+ [transforms.ToTensor(),lambda x: x*255.] print list_transforms data_transforms['train']= transforms.Compose(list_transforms) elif 'none' in list_of_to_dos: print 'DOING NOTHING!!!!!!' data_transforms['train']= transforms.Compose([ transforms.ToTensor(), lambda x: x*255. ]) else: data_transforms['train']= transforms.Compose([ lambda x: augmenters.augment_image(x,list_of_to_dos,mean_im,std_im,im_size), transforms.ToTensor(), lambda x: x*255. ]) data_transforms['val']= transforms.Compose([ transforms.ToTensor(), lambda x: x*255. ]) print data_transforms['train'] # raw_input() # train_data = dataset.CK_96_Dataset_Just_Mean(train_file, mean_file, data_transforms['train']) # test_data = dataset.CK_96_Dataset_Just_Mean(test_file, mean_file, data_transforms['val']) print train_file print test_file print std_file print mean_file # raw_input() train_data = dataset.CK_96_Dataset(train_file, mean_file, std_file, data_transforms['train']) test_data = dataset.CK_96_Dataset(test_file, mean_file, std_file, data_transforms['val']) network_params = dict(n_classes=n_classes,pool_type='max',r=route_iter,init=init,class_weights = class_weights, reconstruct = reconstruct,loss_weights = loss_weights, dropout = dropout) batch_size = 128 batch_size_val = 128 util.makedirs(out_dir_train) train_params = dict(out_dir_train = out_dir_train, train_data = train_data, test_data = test_data, batch_size = batch_size, batch_size_val = batch_size_val, num_epochs = num_epochs, save_after = save_after, disp_after = 1, plot_after = 100, test_after = 1, lr = lr, dec_after = dec_after, model_name = model_name, criterion = criterion, gpu_id = gpu_id, num_workers = 0, model_file = model_file, epoch_start = epoch_start, margin_params = margin_params, network_params = network_params, weight_decay=wdecay) test_params = dict(out_dir_train = out_dir_train, model_num = model_to_test, # num_epochs-1, train_data = train_data, test_data = test_data, gpu_id = gpu_id, model_name = model_name, batch_size_val = batch_size_val, criterion = criterion, margin_params = margin_params, network_params = network_params) print train_params param_file = os.path.join(out_dir_train,'params.txt') all_lines = [] for k in train_params.keys(): str_print = '%s: %s' % (k,train_params[k]) print str_print all_lines.append(str_print) util.writeFile(param_file,all_lines) # if reconstruct: train_model_recon(**train_params) test_model_recon(**test_params) # else: # train_model(**train_params) # test_model(**test_params) getting_accuracy.print_accuracy(out_dir_meta,pre_pend,strs_append,folds,log='log.txt')
def checking_aug(wdecay,lr,route_iter,folds = [4,9],model_name='vgg_capsule_disfa',epoch_stuff=[30,60],res=False, reconstruct = False, oulu = False, meta_data_dir = 'train_test_files_preprocess_vl',loss_weights = None, exp = False, dropout = 0, gpu_id = 0, aug_more = 'flip', model_to_test = None): out_dir_meta = '../experiments_dropout/'+model_name+'_'+str(route_iter) num_epochs = epoch_stuff[1] if model_to_test is None: model_to_test = num_epochs -1 epoch_start = 0 if exp: dec_after = ['exp',0.96,epoch_stuff[0],1e-6] else: dec_after = ['step',epoch_stuff[0],0.1] lr = lr im_resize = 110 im_size = 96 save_after = 100 type_data = 'train_test_files'; n_classes = 8; train_pre = os.path.join('../data/ck_96',type_data) test_pre = os.path.join('../data/ck_96',type_data) if oulu: type_data = 'three_im_no_neutral_just_strong_False'; n_classes = 6; criterion = 'margin' criterion_str = criterion init = False strs_append_list = ['reconstruct',reconstruct]+aug_more+[num_epochs]+dec_after+lr+[dropout] if loss_weights is not None: strs_append_list = strs_append_list +['lossweights']+loss_weights strs_append = '_'+'_'.join([str(val) for val in strs_append_list]) if oulu: pre_pend = 'oulu_96_'+meta_data_dir+'_' else: pre_pend = 'ck_96_'+type_data+'_' lr_p=lr[:] for split_num in folds: if res: print 'what to res?' raw_input() else: model_file = None margin_params = None out_dir_train = os.path.join(out_dir_meta,pre_pend+str(split_num)+strs_append) print out_dir_train final_model_file = os.path.join(out_dir_train,'model_'+str(num_epochs-1)+'.pt') if os.path.exists(final_model_file): print 'skipping',final_model_file # raw_input() # continue else: print 'not skipping', final_model_file # raw_input() # continue if not oulu: train_file = os.path.join(train_pre,'train_'+str(split_num)+'.txt') # train_file = os.path.join(train_pre,'test_'+str(split_num)+'.txt') test_file_easy = os.path.join(train_pre,'test_'+str(split_num)+'.txt') test_file = os.path.join(test_pre,'test_'+str(split_num)+'.txt') mean_file = os.path.join(train_pre,'train_'+str(split_num)+'_mean.png') std_file = os.path.join(train_pre,'train_'+str(split_num)+'_std.png') else: train_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'train_'+str(split_num)+'.txt') test_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'test_'+str(split_num)+'.txt') mean_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'train_'+str(split_num)+'_mean.png') std_file = os.path.join('../data/Oulu_CASIA',meta_data_dir, type_data, 'train_'+str(split_num)+'_std.png') mean_im = scipy.misc.imread(mean_file).astype(np.float32) std_im = scipy.misc.imread(std_file).astype(np.float32) class_weights = util.get_class_weights(util.readLinesFromFile(train_file)) list_of_to_dos = aug_more print list_of_to_dos # raw_input() # aug_more.split('_') # ['flip','rotate','scale_translate', 'pixel_augment'] data_transforms = {} data_transforms['train']= transforms.Compose([ # lambda x: augmenters.augment_image(x,list_of_to_dos,mean_im,std_im,im_size), lambda x: augmenters.hide_and_seek(x, div_sizes = [9,7,5,3], hide_prob = 0.5,fill_val = 0), transforms.ToTensor(), lambda x: x*255. ]) data_transforms['val']= transforms.Compose([ transforms.ToTensor(), lambda x: x*255. ]) # train_data = dataset.CK_96_Dataset_Just_Mean(train_file, mean_file, data_transforms['train']) # test_data = dataset.CK_96_Dataset_Just_Mean(test_file, mean_file, data_transforms['val']) print train_file print test_file print std_file print mean_file # raw_input() train_data = dataset.CK_96_Dataset(train_file, mean_file, std_file, data_transforms['train']) batch_size = 1 # len(train_data) print batch_size train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=False) out_dir_im = '../experiments_dropout/checking_aug/im_flip_check' util.makedirs(out_dir_im) for num_iter_train,batch in enumerate(train_dataloader): if num_iter_train%100==0: print num_iter_train ims = batch['image'].cpu().numpy() # print ims.shape # print np.mean(ims) # print np.std(ims) # print np.min(ims),np.max(ims) # print np.min(train_data.mean),np.max(train_data.mean) # print np.min(train_data.std),np.max(train_data.std) # continue labels = batch['label'] # ims = ims*train_data.std[np.newaxis,np.newaxis,:,:] # ims = ims+train_data.mean[np.newaxis,np.newaxis,:,:] for num_curr, im_curr in enumerate(ims): if num_curr%100==0: print num_curr im_curr = im_curr.squeeze() # print np.min(im_curr),np.max(im_curr) # print im_curr.shape out_file_curr = os.path.join(out_dir_im, '_'.join([str(val) for val in [num_iter_train,num_curr]])+'.png') # print out_file_curr # print np.min(im_curr),np.max(im_curr) # raw_input() # cv2.imwrite(out_file_curr,im_curr) scipy.misc.imsave(out_file_curr,im_curr) # break # break # print ims.shape # print train_data.mean.shape # print train_data.std.shape visualize.writeHTMLForFolder(out_dir_im,'.png') print 'done'