def read_test_data(IMG_WIDTH=256, IMG_HEIGHT=256, IMG_CHANNELS=3): X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) test_sizes = [] print('\nGetting and resizing test images ... ') sys.stdout.flush() if os.path.isfile('test_img.npy') and os.path.isfile('test_sizes.npy'): print('Test data loaded from memory') X_test = np.load('test_img.npy') test_sizes = np.load('test_sizes.npy') return X_test, test_sizes pbar = Progbar(len(test_ids)) for img_num, id_ in enumerate(test_ids): path = os.path.join(TEST_PATH, id_) img = imread(path + '/images/' + id_ + '.png') if len(img.shape) > 2: img = img[:, :, IMG_CHANNELS] else: img = np.stack((img,) * 3, -1) test_sizes.append([img.shape[0], img.shape[1]]) img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True, anti_aliasing=antialias_flag) X_test[img_num] = img pbar.update(img_num) np.save('test_img', X_test) np.save('test_sizes', test_sizes) return X_test, test_sizes
def read_test_data(IMG_WIDTH=256, IMG_HEIGHT=256, IMG_CHANNELS=3): X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) sizes_test = [] print('\nGetting and resizing test images ... ') sys.stdout.flush() b = Progbar(len(test_ids)) for i, id_ in enumerate(test_ids): path = TEST_PATH + id_ img = cv2.imread(path) # img = cv2.medianBlur(img, 3) # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9)) # img = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel) # 同上 # img = cv2.convertScaleAbs(img,alpha=8,beta=0) # print(np.max(img)) # ret, img = cv.threshold(img, 190, 255, cv.THRESH_BINARY) sizes_test.append([img.shape[0], img.shape[1]]) img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) X_test[i] = img b.update(i) # break return X_test, sizes_test
def read_train_data(IMG_WIDTH=256, IMG_HEIGHT=256, IMG_CHANNELS=3, f_hsv=1): X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) print('Getting and resizing train images and masks ... ') sys.stdout.flush() if os.path.isfile("train_img.npy") and os.path.isfile("train_mask.npy"): print("Train file loaded from memory") X_train = np.load("train_img.npy") Y_train = np.load("train_mask.npy") return X_train, Y_train a = Progbar(len(train_ids)) for n, id_ in enumerate(train_ids): path = TRAIN_PATH + id_ img = imread(path + '/images/' + id_ + '.png')[:, :, :IMG_CHANNELS] if f_hsv == 1: img = color.rgb2hsv(img) img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) X_train[n] = img mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) for mask_file in next(os.walk(path + '/masks/'))[2]: mask_ = imread(path + '/masks/' + mask_file) mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True), axis=-1) mask = np.maximum(mask, mask_) Y_train[n] = mask a.update(n) np.save("train_img", X_train) np.save("train_mask", Y_train) return X_train, Y_train
def read_train_data(IMG_WIDTH = 256, IMG_HEIGHT =256, IMG_CHANNELS =3): X_train = np.zeros((len(train_ids),IMG_HEIGHT,IMG_WIDTH,IMG_CHANNELS),dtype = np.uint8) Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) print('Getting and resizing train images and masks ... ') sys.stdout.flush() '''if os.path.isfile("train_img.npy") and os.path.isfile("train_mask.npy"): print("Train file loaded from memory") X_train = np.load("train_img.npy") Y_train = np.load("train_mask.npy") return X_train,Y_train ''' a = Progbar(len(train_ids)) for n, id_ in enumerate(train_ids): path = TRAIN_PATH + 'images/' img = imread(path + id_ )[:,:,:IMG_CHANNELS] img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) X_train[n] = img mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) for mask_file in glob.glob(MASK_PATH+"*.jpg"): if(id_ in mask_file): mask_ = imread(mask_file) mask_ = skimage.color.rgb2gray(mask_) mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True), axis=-1) #mask = np.maximum(mask, mask_) Y_train[n] = mask_ a.update(n) np.save("train_img",X_train) np.save("train_mask",Y_train) return X_train,Y_train
def test_data(self): print('\nGetting and resizing test images ... ') sys.stdout.flush() if os.path.isfile('test_img.npy') and os.path.isfile('test_sizes.npy'): print('Test data loaded from memory') self.X_test = np.load('test_img.npy') self.test_sizes = np.load('test_sizes.npy') return pbar = Progbar(len(test_ids)) for img_num, id_ in enumerate(test_ids): path = os.path.join(settings.TEST_PATH, id_) img = imread(path + '/images/' + id_ + '.png') if len(img.shape) > 2: img = img[:, :, :self.img_channels] else: img = np.stack((img, ) * 3, -1) self.test_sizes.append([img.shape[0], img.shape[1]]) img = resize(img, (self.img_height, self.img_width), mode='constant', preserve_range=True, anti_aliasing=antialias_flag) self.X_test[img_num] = img pbar.update(img_num) np.save('test_img', self.X_test) np.save('test_sizes', self.test_sizes)
def load_weights_from_tf_checkpoint(model, checkpoint_file): print('Load weights from tensorflow checkpoint') progbar = Progbar(target=len(model.layers)) reader = tf.train.NewCheckpointReader(checkpoint_file) for index, layer in enumerate(model.layers): progbar.update(current=index) if isinstance(layer, layers.convolutional.SeparableConv2D): depthwise = reader.get_tensor('{}/depthwise_weights'.format( layer.name)) pointwise = reader.get_tensor('{}/pointwise_weights'.format( layer.name)) layer.set_weights([depthwise, pointwise]) elif isinstance(layer, layers.convolutional.Convolution2D): weights = reader.get_tensor('{}/weights'.format(layer.name)) layer.set_weights([weights]) elif isinstance(layer, layers.BatchNormalization): beta = reader.get_tensor('{}/beta'.format(layer.name)) gamma = reader.get_tensor('{}/gamma'.format(layer.name)) moving_mean = reader.get_tensor('{}/moving_mean'.format( layer.name)) moving_variance = reader.get_tensor('{}/moving_variance'.format( layer.name)) layer.set_weights([gamma, beta, moving_mean, moving_variance]) elif isinstance(layer, layers.Dense): weights = reader.get_tensor('{}/weights'.format(layer.name)) biases = reader.get_tensor('{}/biases'.format(layer.name)) layer.set_weights([weights[:, 1:], biases[1:]])
def read_test_data(IMG_WIDTH=256, IMG_HEIGHT=256, IMG_CHANNELS=3): X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) sizes_test = [] print('\nGetting and resizing test images ... ') sys.stdout.flush() if os.path.isfile("test_img.npy") and os.path.isfile("test_size.npy"): print("Test file loaded from memory") X_test = np.load("test_img.npy") sizes_test = np.load("test_size.npy") return X_test, sizes_test b = Progbar(len(test_ids)) for n, id_ in enumerate(test_ids): path = TEST2_PATH + id_ # print(id_) try: img = imread(str(path) + r'/images/' + str(id_) + r'.png')[:, :, :IMG_CHANNELS] except IndexError: print('\n' + id_) pass sizes_test.append([img.shape[0], img.shape[1]]) img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) X_test[n] = img b.update(n) np.save("test_img", X_test) np.save("test_size", sizes_test) return X_test, sizes_test
def __invert(self): imgs = self.X_in num_imgs = imgs.shape[0] inverted_imgs = [] print('\nInverting train data ... ') sys.stdout.flush() if os.path.isfile('inverted_imgs.npy'): print('Inverted data loaded from memory') inverted_imgs = np.load('inverted_imgs.npy') return inverted_imgs pbar = Progbar(num_imgs) for idx in range(num_imgs): img = imgs[idx] img = invert(img) inverted_imgs.append(img) pbar.update(idx) inverted_imgs = np.array(inverted_imgs) np.save("inverted_imgs", inverted_imgs) return inverted_imgs
def train(self, train_dir, val_dir, epochs=10, batch_size=64): # Generators color_mode = 'rgb' if self.input_shape[-1] > 1 else 'grayscale' datagen = ImageDataGenerator(rescale=1. / 255, fill_mode='constant') train_gen = datagen.flow_from_directory( train_dir, target_size=self.input_shape[:2], interpolation='bilinear', color_mode=color_mode, class_mode='categorical', batch_size=batch_size) val_gen = datagen.flow_from_directory(val_dir, target_size=self.input_shape[:2], interpolation='bilinear', color_mode=color_mode, class_mode='categorical', batch_size=batch_size) steps_per_epoch = (np.ceil(train_gen.n / batch_size)).astype('int') for i in range(epochs): print('Epoch %i/%i' % (i + 1, epochs)) pbar = Progbar(steps_per_epoch) self._reconstruct_samples(val_gen, i) for j in range(steps_per_epoch): x, _ = train_gen.next() critic_loss = self.critic_trainer.train_on_batch(x=x, y=None) gen_loss = self.gen_trainer.train_on_batch(x=x, y=None) pbar.update(j + 1, [('critic loss', critic_loss), ('generator loss', gen_loss)]) # Save weights self.encoder.save_weights('./encoder.h5') self.decoder.save_weights('./decoder.h5') self.critic.save_weights('./critic.h5')
def __add_noise(self): imgs = self.X_in num_imgs = imgs.shape[0] noisy_imgs = [] print('\nAdding noise on train data ... ') sys.stdout.flush() if os.path.isfile('noisy_imgs.npy'): print('Noisy data loaded from memory') noisy_imgs = np.load('noisy_imgs.npy') return noisy_imgs pbar = Progbar(num_imgs) for idx in range(num_imgs): img = imgs[idx] img = random_noise(img) noisy_imgs.append(img) pbar.update(idx) noisy_imgs = np.array(noisy_imgs) np.save("noisy_imgs", noisy_imgs) return noisy_imgs
def read_images(file_ids): import numpy as np from skimage.transform import resize from keras.utils import Progbar from skimage.io import imread CNN_list = np.zeros((len(file_ids), 128, 128, 3), dtype=np.uint8) mask_list = np.zeros((len(file_ids), 358, 352, 3), dtype=np.uint8) im_sizes = [] print('\nGetting and resizing images ... ') b = Progbar(len(file_ids)) for n, id_ in enumerate(file_ids): path = dir_src + id_ img = imread(path)[:, :, :3] im_sizes.append([img.shape[0], img.shape[1]]) img_net = resize(img, (128, 128), mode='constant', preserve_range=True) CNN_list[n] = img_net # holds images for CNN img_mask = resize(img, (358, 352), mode='constant', preserve_range=True) mask_list[n] = img_mask # holds images for CNN b.update(n) return CNN_list, im_sizes, mask_list
def read_test_data(IMG_WIDTH=256, IMG_HEIGHT=256, IMG_CHANNELS=3): X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) sizes_test = [] print('\nGetting and resizing test images ... ') sys.stdout.flush() if os.path.isfile(INPUT_PATH + "test_img.npy") and os.path.isfile(INPUT_PATH + "test_size.npy"): print("Test file loaded from memory") X_test = np.load(INPUT_PATH + "test_img.npy") sizes_test = np.load(INPUT_PATH + "test_size.npy") return X_test, sizes_test b = Progbar(len(test_ids)) for n, id_ in enumerate(test_ids): path = TEST_PATH + id_ img = imread(path + '/images/' + id_ + '.png') if len(img.shape) == 2: img = skimage.color.gray2rgb(img) img = img[:, :, :IMG_CHANNELS] sizes_test.append([img.shape[0], img.shape[1]]) img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) X_test[n] = img b.update(n) np.save(INPUT_PATH + "test_img", X_test) np.save(INPUT_PATH + "test_size", sizes_test) return X_test, sizes_test
def dev_evaluate(self, model, data): correctLabels = [] predLabels = [] b = Progbar(len(data)) for i, data1 in enumerate(data): tokens, casing, char, labels = data1 tokens = np.asarray([tokens]) casing = np.asarray([casing]) char = np.asarray([char]) pred = model.predict([tokens, casing, char], verbose=False)[0] pred = pred.argmax(axis=-1) # Predict the classes correctLabels.append(labels) predLabels.append(pred) b.update(i) b.update(i + 1) label_pred = [] for sentence in predLabels: label_pred.append( [self.idx2Label[element] for element in sentence]) label_correct = [] for sentence in correctLabels: label_correct.append( [self.idx2Label[element] for element in sentence]) acc = ChunkF1() for pred, label in zip(label_pred, label_correct): acc.update(pred, label) print(float(acc.get()[1]))
def train(self, trn_data: List[Tuple[List[str], List[str]]], dev_data: List[Tuple[List[str], List[str]]], *args, **kwargs): """ Trains the model. :param trn_data: the training data. :param dev_data: the development data. :param args: :param kwargs: :return: """ self.model = self.get_model() epochs = 80 for epoch in range(epochs): print("Epoch %d/%d" % (epoch, epochs)) a = Progbar(len(self.train_batch_len)) for i, batch in enumerate( self.get_mini_batch(self.train_batch, self.train_batch_len)): labels, tokens, casing, char = batch self.model.train_on_batch([tokens, casing, char], labels) a.update(i) a.update(i + 1) print(' ') # model.save("hw3-model") save_data = [ self.word_embeddings, self.case_embeddings, self.idx2Label, self.word_idx, self.word_idx, self.label_idx, self.case_idx, self.char_idx ] with open(os.path.join(resource_dir, 'pickle'), 'wb') as handle: pickle.dump(save_data, handle)
def read_train_data(IMG_WIDTH=256, IMG_HEIGHT=256, IMG_CHANNELS=1): X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) Y_train = np.zeros((len(label_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) print('Getting and resizing train images and masks ... ') sys.stdout.flush() if os.path.isfile("train_img.npy") and os.path.isfile("train_mask.npy"): print("Train file loaded from memory") X_train = np.load("train_img.npy") Y_train = np.load("train_mask.npy") return X_train, Y_train print('Numpy file for Train Images') a = Progbar(len(train_ids)) for n, id_ in enumerate(train_ids): if 'w1' in id_: path = TRAIN_PATH img = imread(path + id_) #[:,:,:IMG_CHANNELS] img = resize(img, (IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), mode='constant', preserve_range=True) X_train[n] = img a.update(n) print('Numpy file for Train Mask') b = Progbar(len(label_ids)) for n, id_ in enumerate(label_ids): if 'w2' in id_: path = LABEL_PATH img = imread(path + id_) np.expand_dims(resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True), axis=-1) img = resize(img, (IMG_HEIGHT, IMG_WIDTH, 1), mode='constant', preserve_range=True) Y_train[n] = img b.update(n) print('-30', len(Y_train)) Y_train = np.array(list(Y_train) * 16) #'''*16''' print('@#@', X_train.shape, Y_train.shape) np.save("train_img_w2", X_train) np.save("train_mask_w2", Y_train) # print(X_train.shape == Y_train.shape) return X_train, Y_train
def mask_to_rle(preds_test_upsampled): new_test_ids = [] rles = [] b = Progbar(len(test_ids)) for n, id_ in enumerate(test_ids): rle = list(prob_to_rles(preds_test_upsampled[n])) rles.extend(rle) new_test_ids.extend([id_] * len(rle)) b.update(n) return new_test_ids, rles
def read_train_data(IMG_WIDTH=256, IMG_HEIGHT=256, IMG_CHANNELS=3): problem_ids = list() problem_ids.append( '7b38c9173ebe69b4c6ba7e703c0c27f39305d9b2910f46405993d2ea7a963b80') problem_ids.append( 'b1eb0123fe2d8c825694b193efb7b923d95effac9558ee4eaf3116374c2c94fe') problem_ids.append( '9bb6e39d5f4415bc7554842ee5d1280403a602f2ba56122b87f453a62d37c06e') problem_ids.append( '1f0008060150b5b93084ae2e4dabd160ab80a95ce8071a321b80ec4e33b58aca') problem_ids.append( '58c593bcb98386e7fd42a1d34e291db93477624b164e83ab2afa3caa90d1d921') problem_ids.append( 'adc315bd40d699fd4e4effbcce81cd7162851007f485d754ad3b0472f73a86df') problem_ids.append( '12aeefb1b522b283819b12e4cfaf6b13c1264c0aadac3412b4edd2ace304cb40') problem_ids.append( '0a7d30b252359a10fd298b638b90cb9ada3acced4e0c0e5a3692013f432ee4e9') X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) print('Getting and resizing train images and masks ... ') sys.stdout.flush() if os.path.isfile("train_img.npy") and os.path.isfile("train_mask.npy"): print("Train file loaded from memory") X_train = np.load("train_img.npy") Y_train = np.load("train_mask.npy") return X_train, Y_train a = Progbar(len(train_ids)) for n, id_ in enumerate(train_ids): if id_ in problem_ids: print('skipped') continue path = TRAIN_PATH + id_ img = imread(path + '/images/' + id_ + '.png')[:, :, :IMG_CHANNELS] img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) X_train[n] = img mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) for mask_file in next(os.walk(path + '/masks/'))[2]: mask_ = imread(path + '/masks/' + mask_file) mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True), axis=-1) mask = np.maximum(mask, mask_) Y_train[n] = mask a.update(n) np.save("train_img", X_train) np.save("train_mask", Y_train) return X_train, Y_train
def run_epoch(self, session, dataset, train_dir): num_samples = len(dataset['context']) num_batches = num_samples // self.FLAG.batch_size + 1 progress = Progbar(target=num_batches) for i, train_batch in enumerate(batches(dataset, is_train=True, batch_size=self.FLAG.batch_size)): loss_ = self.optimize(session, train_batch) progress.update(i + 1, [('loss', loss_)]) if i % 1000 == 0: saver = tf.train.Saver() saver.save(session, train_dir + "/mymodel") self.evaluate_answer(session, dataset, log=True)
def train(G, D, combined, epochs=50): # Load and normalize data: (x_train, _), (x_test, _) = mnist.load_data() x_train = np.concatenate((x_train, x_test), axis=0) x_train = (x_train.astype(np.float32) - 127.5) / 127.5 x_train = x_train.reshape((-1, IMG_ROWS, IMG_COLS, CHANNELS)) # Number of batch loops: batch_loops = int(NUM_IMGS // BATCH_SIZE) # Train InfoGAN: for epoch in range(epochs): shuffle_idx = np.random.permutation(NUM_IMGS) real_imgs = x_train[shuffle_idx] progress_bar = Progbar(target=batch_loops) for batch_i in range(batch_loops): progress_bar.update(batch_i) # Discriminator: real_img_batch = real_imgs[batch_i * BATCH_SIZE:(batch_i + 1) * BATCH_SIZE] noise_batch = np.random.normal(0, 1, (BATCH_SIZE, NOISE_SIZE)) c_cat_batch = np.random.randint(0, C_CAT_SIZE, BATCH_SIZE) c_cat_batch = to_categorical(c_cat_batch, num_classes=C_CAT_SIZE) c_cont_batch = np.random.uniform(-1, 1, (BATCH_SIZE, C_CONT_NUM)) fake_img_batch = G.predict( [noise_batch, c_cat_batch, c_cont_batch]) d_loss_real = D.train_on_batch(real_img_batch, np.ones(BATCH_SIZE)) d_loss_fake = D.train_on_batch(fake_img_batch, np.zeros(BATCH_SIZE)) d_loss_total = np.add(d_loss_real, d_loss_fake) * 0.5 # Generator: noise_batch = np.random.normal(0, 1, (2 * BATCH_SIZE, NOISE_SIZE)) c_cat_batch = np.random.randint(0, C_CAT_SIZE, 2 * BATCH_SIZE) c_cat_batch = to_categorical(c_cat_batch, num_classes=C_CAT_SIZE) c_cont_batch = np.random.uniform(-1, 1, (2 * BATCH_SIZE, C_CONT_NUM)) g_loss = combined.train_on_batch( [noise_batch, c_cat_batch, c_cont_batch], [np.ones(2 * BATCH_SIZE), c_cat_batch, c_cont_batch]) # print(d_loss_total) # print(g_loss) print(epoch) print(d_loss_total) print(g_loss) save_images(G, epoch)
def train_model(model): for epoch in range(epochs): print("Epoch %d/%d" % (epoch, epochs)) a = Progbar(len(train_batch_len)) for i, batch in enumerate( iterate_minibatches(train_batch, train_batch_len)): labels, tokens, casing, char = batch model.train_on_batch([tokens, casing, char], labels) a.update(i) print(' ') return model
def __blur(self): imgs = self.X_in labels = self.Y_in num_imgs = imgs.shape[0] blur_imgs = [] blur_labels = [] labels.dtype = np.uint8 print('\nBluring train data ... ') sys.stdout.flush() save_name = 'blur_imgs.npz' if os.path.isfile(save_name): print('blured data loaded from memory') blur = np.load(save_name) blur_imgs = blur['blur_imgs'] blur_labels = blur['blur_labels'] return [blur_imgs, blur_labels] pbar = Progbar(num_imgs) for idx in range(num_imgs): img = imgs[idx] label = labels[idx] sigma = img.shape[1] * 0.05 / 4 blur_size = int(2 * sigma) | 1 img = cv2.GaussianBlur(img, ksize=(blur_size, blur_size), sigmaX=sigma) label = cv2.GaussianBlur(label, ksize=(blur_size, blur_size), sigmaX=sigma) blur_imgs.append(img) blur_labels.append(label) pbar.update(idx) blur_imgs = np.array(blur_imgs) blur_imgs = img_as_ubyte(blur_imgs) blur_labels = np.array(blur_labels) blur_labels = np.expand_dims(blur_labels, axis=-1) blur_labels.dtype = np.bool labels.dtype = np.bool np.savez(save_name, blur_imgs=blur_imgs, blur_labels=blur_labels) return [blur_imgs, blur_labels]
def quick_roll(): print('quick_roll') bar = Progbar(target=len(env.i_a), width=30, interval=0.05) log_rewards = [] log_saved = [] dic_init(args.fn) loss_old = 0 for epoch in range(1): m.logprob_history = [] m.rewards = [] env.reset() print(len(dic['step'])) # load_checkpoints(fn) # for id_,(iid,aid) in enumerate(env.i_a.items()): # for id_,(iid,mid) in enumerate(dic['im'].items()): for id_, iid in enumerate(dic['iid']): # foirwarding # cur=env.app[app.aid==aid] step = dic['step'][id_] # print(iid,mid) # if (aid+iid)=='': if step == -1: break else: if id_ == 0: print(iid) if id_ == 300: print(iid) aid = env.i_a[iid] cur = env.a_idx[aid] a = env.df_a_i.iloc[cur].cpu.split('|') assert pd.Series(a, dtype=float).max() == env.unit['c'][cur] not_quick_roll = 0 # mid=dic['im'][iid] end = run_game(step, cur, not_quick_roll) # show_mid=[['mid',int(mid.data.numpy())]] # show_mid+=[['iid',iid]] # show_mid+=[['aid',aid]] bar.update(id_) if end: break
def tag_dataset(dataset): correctLabels = [] predLabels = [] b = Progbar(len(dataset)) for i, data in enumerate(dataset): tokens, labels = data tokens = np.asarray([tokens]) pred = model.predict([tokens], verbose=False)[0] pred = pred.argmax(axis=-1) #Predict the classes correctLabels.append(labels) predLabels.append(pred) b.update(i) return predLabels, correctLabels
def train(self): log.info('Training Model') self.init_train_data() self.init_image_callback() sl = SaveLoss(self.conf.folder) cl = CSVLogger(self.conf.folder + '/training.csv') cl.on_train_begin() es = EarlyStopping('val_loss_mod2_fused', min_delta=0.01, patience=60) es.model = self.model.Segmentor es.on_train_begin() loss_names = self.get_loss_names() total_loss = {n: [] for n in loss_names} progress_bar = Progbar(target=self.batches * self.conf.batch_size) for self.epoch in range(self.conf.epochs): log.info('Epoch %d/%d' % (self.epoch, self.conf.epochs)) epoch_loss = {n: [] for n in loss_names} epoch_loss_list = [] for self.batch in range(self.batches): self.train_batch(epoch_loss) progress_bar.update((self.batch + 1) * self.conf.batch_size) self.validate(epoch_loss) for n in loss_names: epoch_loss_list.append((n, np.mean(epoch_loss[n]))) total_loss[n].append(np.mean(epoch_loss[n])) log.info(str('Epoch %d/%d: ' + ', '.join([l + ' Loss = %.3f' for l in loss_names])) % ((self.epoch, self.conf.epochs) + tuple(total_loss[l][-1] for l in loss_names))) logs = {l: total_loss[l][-1] for l in loss_names} cl.model = self.model.D_Mask cl.model.stop_training = False cl.on_epoch_end(self.epoch, logs) sl.on_epoch_end(self.epoch, logs) # Plot some example images self.img_callback.on_epoch_end(self.epoch) self.model.save_models() if self.stop_criterion(es, logs): log.info('Finished training from early stopping criterion') break
def crop_images(imgs, labels, crop_rate=0.7, IMG_WIDTH=256, IMG_HEIGHT=256, IMG_CHANNELS=3): num_imgs = imgs.shape[0] crp_imgs = [] crp_labels = [] print('\nCropping train data with rate {:.2f} ...'.format(crop_rate)) sys.stdout.flush() save_name = 'crop_{:d}'.format(int(crop_rate*100))+'.npz' if os.path.isfile(save_name): print('{:.2f} rate cropped data loaded from memory'.format(crop_rate)) crp = np.load(save_name) crp_imgs = crp['crp_imgs'] crp_labels = crp['crp_labels'] return [crp_imgs, crp_labels] pbar = Progbar(num_imgs) for idx in range(num_imgs): img = imgs[idx] label = labels[idx] size = img.shape[0] csize = random.randint(np.floor(crop_rate * size), size) w_c = random.randint(0, size - csize) h_c = random.randint(0, size - csize) img = img[w_c:w_c + size, h_c:h_c + size, :] label = label[w_c:w_c + size, h_c:h_c + size, :] img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=False, anti_aliasing=antialias_flag) label = resize(label, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=False, anti_aliasing=antialias_flag) img = img_as_ubyte(img) label = img_as_ubyte(label) label.dtype = np.bool crp_imgs.append(img) crp_labels.append(label) pbar.update(idx) crp_imgs = np.array(crp_imgs) crp_labels = np.array(crp_labels) np.savez(save_name, crp_imgs=crp_imgs, crp_labels=crp_labels) return [crp_imgs, crp_labels]
def make_validation_images(self): x = [] y = [] print('making validation images') self.augmentation_enabled = False b = Progbar(self.validation_split_index) for i in np.arange(0, self.validation_split_index): xi, yi = self.get_batch(i, 1) x.append(xi[0]) y.append(yi[0]) b.update(i) X = np.array(x) Y = np.array(y) self.validation_images = (X, Y) self.augmentation_enabled = True return X, Y
class BatchGenerator(object): """ Class for queueing up batches so models can be trained in parallel """ def __init__(self, q_length, num_procs, batch_maker, batch_size): super(BatchGenerator, self).__init__() self.q = Queue() self.lock = Lock() self.offset = 0 self.num_procs = num_procs self.q_length = q_length self.batch_maker = batch_maker self.batch_size = batch_size self.epoch = 0 self.epoch_progress = 0 self.bar = Progbar(self.batch_maker.epoch_length) def start_queue_runners(self): for i in np.arange(0, self.num_procs): t = Thread(target=self.__batch_loop, args=(self.q, self.q_length, i)) t.setDaemon(True) t.start() def get_next_batch_from_q(self): X, Y = self.q.get() self.bar.update(self.offset % self.batch_maker.epoch_length) return (X, Y) def current_epoch(self): return np.floor(self.offset / self.batch_maker.epoch_length) def num_items_in_q(self): return self.q.qsize() def __batch_loop(self, q, max_q, worker_num): while True: s = q.qsize() if s < max_q: with self.lock: batch_num = self.offset self.offset += self.batch_size self.epoch = int( floor(self.offset / self.batch_maker.epoch_length)) self.epoch_progress = self.offset / self.batch_maker.epoch_length self.batch_maker.make_batch(q, batch_num, self.batch_size, worker_num)
def tag_dataset(model, dataset): correctLabels = [] predLabels = [] sentences = [] b = Progbar(len(dataset)) for i, data in enumerate(dataset): tokens, casing, char, labels = data tokens = np.asarray([tokens]) casing = np.asarray([casing]) char = np.asarray([char]) pred = model.predict([tokens, casing, char], verbose=False)[0] pred = pred.argmax(axis=-1) correctLabels.append(labels) sentences.append(tokens) predLabels.append(pred) b.update(i) return predLabels, correctLabels, sentences
def __eltransform(self): imgs = self.X_in labels = self.Y_in num_imgs = imgs.shape[0] labels.dtype = np.uint8 elt_imgs = [] elt_labels = [] print('\nPerforming elastic transform on train data ... ') sys.stdout.flush() if os.path.isfile('elstc.npz'): print('Elastic trsformed data loaded from memory') elstc = np.load('elstc.npz') elt_imgs = elstc['elt_imgs'] elt_labels = elstc['elt_labels'] return elt_imgs, elt_labels pbar = Progbar(imgs.shape[0]) for idx in range(num_imgs): img = imgs[idx] label = labels[idx] alpha = img.shape[1] * 1 sigma = img.shape[1] * 0.05 img_elt = self.__elastic_transform(img, alpha, sigma) label_elt = self.__elastic_transform(label, alpha, sigma) elt_imgs.append(img_elt) elt_labels.append(label_elt) pbar.update(idx) elt_imgs = np.array(elt_imgs) elt_labels = np.array(elt_labels) elt_labels.dtype = np.bool labels.dtype = np.bool np.savez("elstc", elt_imgs=elt_imgs, elt_labels=elt_labels) return elt_imgs, elt_labels
def tag_dataset(dataset): correctLabels = [] predLabels = [] b = Progbar(len(dataset)) for i, data in enumerate(dataset): tokens, casing, char, labels, pos_tag = data input, output = transform([[tokens, casing, char, labels, pos_tag]], max(2,len(labels)), pos_tag_index) pred = model.predict(input, verbose=False) pred = np.add(np.squeeze(pred[0]), np.flip(np.squeeze(pred[1]), axis=0)) pred = pred.argmax(axis=-1) # Predict the classes output = np.squeeze(output[0]) output = np.argmax(output, axis=1) correctLabels.append(output) predLabels.append(pred) b.update(i) print(metrics.classification_report(list(chain.from_iterable(correctLabels)), list(chain.from_iterable(predLabels)))) return predLabels, correctLabels