def SaveNpy(self, entries, sel): args = self.arggen(entries) data_path, fext, num, cr = args[0], args[1], int(args[2]), float( args[3]) if sel == 'trlab': if selfold in sorted(os.listdir(os.path.join(data_path, train_p))): print( 'Augmented data found. Saving augmented data instead of original ones' ) create_train_data(data_path, os.path.join(train_p, selfold), os.path.join(label_p, selfold), fext) else: create_train_data(data_path, train_p, label_p, fext) if sel == 'test': create_test_data(data_path, test_p, fext)
def main(): args = parse_args() op = load_config(args.config) op.test = args.test print('Testing data....', op.test) # Set random seed for reproducibility manualSeed = op.seed print("Random Seed: ", manualSeed) random.seed(manualSeed) torch.manual_seed(manualSeed) # Load data/models on GPU? device = gpu_check(op) print('Creating data...') real_data_loader = create_train_data(op, device) if op.test: print('Done testing data!') return # Init wandb wandb.init(project='dfdf', config=op) print('==== Config ====', wandb.config) # Now we can enter the training loop! print('Creating models...') awesome = create_model(op, device) print('Starting training loop..') awesome.train(real_data_loader)
def train(model_name): #Get the lables from the .env file labels = env('LABELS') size = int(env('IMG_SIZE')) #create the train data (format the training images) train_data = create_train_data() tf.reset_default_graph() convnet = network(size, labels) model = tflearn.DNN(convnet, tensorboard_dir='log') #if the model already exists, load it so we are not training from scratch if os.path.exists('{}.meta'.format(model_name)): model.load(model_name) print('model loaded!') X = np.array([i[0] for i in train_data]).reshape(-1, size, size, 1) Y = [i[1] for i in train_data] model.fit(X, Y, n_epoch=50) #save the model in the models folder model.save('../models/' + model_name) print("here") print(type(model)) print("here") return model
def fit_hopfield(params): # get params n_label = params.get('n_label', None) n_sample = params['n_sample'] fit_mode = params['fit_mode'] # load dataset dataset = load_alphabet() # target_name target_names = params.get('target_names', None) if target_names is None: target_names = dataset.target_names[:n_label] # transform data dataset.data = binarize(dataset.data, binary_values=(-1,1)) # create train data X, y = create_train_data(data=dataset.data, target=dataset.target, target_names=target_names, n_sample=n_sample) print_train_data(X, y, target_names) # fit hopfield hf = Hopfield(mode=fit_mode) hf.fit(X, y, watch_weight=False) # set params params['img_shape'] = dataset.image_shape return hf, X, y, target_names, params
... Lab3 Improve the accuracy of Lab2 Tips: Try to change the training parameters: learning rate, epoch, batch size, etc. Try to augment the image samples using the last example of https://keras.io/preprocessing/image/#imagedatagenerator Try to replace the uppooling layer with deconvolution layer (ref. https://github.com/k3nt0w/FCN_via_keras ) Try to increase receptive fields by replace convolution2D with AtrousConvolution2D (is it same as reduce running resolution?) Transfer learning from data import create_train_data, create_test_data create_train_data() create_test_data() from __future__ import print_function from scipy import misc import numpy as np from keras.models import Model from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D, AtrousConvolution2D from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler from keras import backend as K import os from data import load_train_data, load_test_data K.set_image_dim_ordering('th') # Theano dimension ordering in this code original_img_rows = 1024 original_img_cols = 1024 running_img_rows = 256
epochs=netpram.nb_epoch, max_queue_size=50, workers=4, verbose=1, callbacks=callbacks, validation_data=valid_generator(imgs_test, imgs_mask_test, netprameval), validation_steps=np.ceil( float(len(imgs_test)) / float(netpram.batch_size))) if __name__ == "__main__": netparam = params.init() netparameval = params.init(train=0) x = [[1, 3], [2, 5], [4, 8], [6, 7]] data.create_train_data(netparam) data.create_test_data(netparam) for indx in range(0, 4): imgs_train, imgs_mask_train = data.load_train_data() imgs_test, imgs_mask_test = data.load_test_data() np.random.seed(1234) Traindatagen = CustomImageDataGenerator(netparam, training=1) Validdatagen = CustomImageDataGenerator(netparam, training=1) d1 = str(x[indx][0]) d2 = str(x[indx][1]) ids_train = [ i for i, s in enumerate(imgs_mask_train) if 'instrument_dataset_' + d1 not in s and 'instrument_dataset_' + d2 not in s ] ids_val = [ i for i, s in enumerate(imgs_mask_test) if 'instrument_dataset_' +
def AugmentDS(self, Augentries): datagen = ImageDataGenerator( # featurewise_center=False, # featurewise_std_normalization=False, # samplewise_center=False, # samplewise_std_normalization=False, rescale=None, rotation_range=3, width_shift_range=0.08, height_shift_range=0.08, shear_range=0.07, zoom_range=0.07, horizontal_flip=True, vertical_flip=True, fill_mode='constant', cval=0.) args = self.arggen(Augentries) # trimgs,labimgs,data_path,num data_path, fext, num, cr = args[0], args[1], int(args[2]), float( args[3]) create_train_data(data_path, train_p, label_p, fext) imgs = load_train_data(data_path, train_npy, labels_npy) nLabels = np.max(imgs[1]) trimgs, labimgs = imgs[0], imgs[1] imgshape = trimgs[0].shape print('-' * 30) print('Augmenting train and labels dataset: ', num, 'replica per image...') print('-' * 30) # seed = np.random.randint(10000) seed = np.random.randint(10000, size=2 * len(trimgs) * num) tmpf = 'tmp' if tmpf in sorted(os.listdir(os.path.join(data_path, train_p))): shutil.rmtree(os.path.join(data_path, train_p, tmpf), ignore_errors=True) shutil.rmtree(os.path.join(data_path, label_p, tmpf), ignore_errors=True) os.makedirs(os.path.join(data_path, train_p, tmpf)) os.makedirs(os.path.join(data_path, label_p, tmpf)) global batchdata batchdata = [] j = 0 for x in trimgs: x[x == 0] = 1 x = x.reshape((1, ) + x.shape + (1, )) # the .flow() command below generates batches of randomly transformed images # and saves the results to the `preview/` directory i = 0 for batch in datagen.flow(x, batch_size=1, seed=seed[j]): self.save_tif(data_path, os.path.join(train_p, tmpf), 'img', batch[0, :, :, 0].astype('uint8'), seed[i + j * 2 * num], fext) i += 1 if i >= 2 * num: break # otherwise the generator would loop indefinitely j += 1 j = 0 for y in labimgs: y = y.reshape((1, ) + y.shape + (1, )) i = 0 for batch in datagen.flow(y, batch_size=1, seed=seed[j]): self.save_tif(data_path, os.path.join(label_p, tmpf), 'img', batch[0, :, :, 0].astype('uint8'), seed[i + j * 2 * num], fext) batchdata.append(batch[0, :, :, 0]) i += 1 if i >= 2 * num: break # otherwise the generator would loop indefinitely j += 1 create_train_data(data_path, os.path.join(train_p, tmpf), os.path.join(label_p, tmpf), fext) tmpimgs = load_train_data(data_path, train_npy, labels_npy) tmptr = tmpimgs[0] tmplab = tmpimgs[1] print(imgshape, cr) lencrop = int(((imgshape[0] * cr) // 16) * 16), int( ((imgshape[1] * cr) // 16) * 16) print(lencrop) delta = imgshape[0] - lencrop[0], imgshape[1] - lencrop[1] print(delta) seltr = [] sellab = [] j = 0 for i, img in enumerate(tmptr): tmpres = crop_no_black(tmptr[i], tmplab[i], lencrop) if tmpres is not None: seltr.append(tmpres[0]) sellab.append(tmpres[1]) j += 1 if j > len(trimgs) * (num + 1): break seltr = np.array(seltr) sellab = np.array(sellab) print(seltr.shape, sellab.shape) np.save(os.path.join(data_path, 'imgs_train.npy'), seltr) print('Augmented train data saved to:', os.path.join(data_path, 'imgs_train.npy')) np.save(os.path.join(data_path, 'imgs_labels.npy'), sellab) print('Augmented label data saved to:', os.path.join(data_path, 'imgs_labels.npy')) if selfold in sorted(os.listdir(os.path.join(data_path, train_p))): shutil.rmtree(os.path.join(data_path, train_p, selfold), ignore_errors=True) shutil.rmtree(os.path.join(data_path, label_p, selfold), ignore_errors=True) os.makedirs(os.path.join(data_path, train_p, selfold)) os.makedirs(os.path.join(data_path, label_p, selfold)) for i in range(len(seltr)): self.save_tif(data_path, os.path.join(train_p, selfold), 'img', seltr[i], i, fext) self.save_tif(data_path, os.path.join(label_p, selfold), 'img', sellab[i], i, fext) # create_train_data(data_path,train_p,label_p,fext) if tmpf in sorted(os.listdir(os.path.join(data_path, train_p))): shutil.rmtree(os.path.join(data_path, train_p, tmpf), ignore_errors=True) shutil.rmtree(os.path.join(data_path, label_p, tmpf), ignore_errors=True) print('Done') return