def array_to_img(x, data_format=None, scale=True, dtype=None): if data_format is None: data_format = backend.image_data_format() if 'dtype' in generic_utils.getargspec(image.array_to_img).args: if dtype is None: dtype = backend.floatx() return image.array_to_img(x, data_format=data_format, scale=scale, dtype=dtype) return image.array_to_img(x, data_format=data_format, scale=scale)
def data_augumentation(imgs, img_type): from image import array_to_img, img_to_array import cv2 tmp = list() for x in xrange(imgs.shape[0]): img = imgs[x] if img_type == 'clabel': img = clabel_to_img(img, 128, 128) tmp.append(array_to_img(img)) augmentated_list = rotate_img(tmp) augmentated_list = flip_img(augmentated_list) total = len(augmentated_list) augmentated_imgs = np.zeros([total, 1, 128, 128], dtype='float32') for x in xrange(total): if img_type == 'clabel': data_path = os.path.join('test/retrain/label/img' + str(x) + '.png') augmentated_list[x].save(data_path) else: data_path = os.path.join('test/retrain/raw/img' + str(x) + '.png') augmentated_list[x].save(data_path) augmentated_imgs[x] = img_to_array(augmentated_list[x]) / 255 if img_type == 'raw': return augmentated_imgs if img_type == 'clabel': augmentated_clabels = categorize_label(augmentated_imgs) return augmentated_clabels
def visualize_patches(data_path, output_path, img_type): from image import combine_img, array_to_img from predict import clabels_to_img if img_type not in {'raw', 'label'}: raise ValueError('Invalid img_type:', img_type) data = np.load(data_path) if img_type == 'label': imgs = clabels_to_img(data, 512, 512) else: imgs = data total = len(imgs) tmp_list = list() combined_imgs = np.zeros([total / 16, 1, imgs.shape[2], imgs.shape[3]]) for img_array in imgs: img_array *= 255 img = array_to_img(img_array, scale=False) tmp_list.append(img) for x in xrange(total / 16): combined_img = combine_img(tmp_list[16 * x:16 * (x + 1)]) data_path = os.path.join(output_path, 'img' + str(x) + '.png') combined_img.save(data_path) print 'save image:', x + 1, '/', total / 16
def visualize_ts(dir_path, imgs_test_name, imgs_true, imgs_pred): from image import array_to_img ts_path = 'ts/' dir_path = os.path.join(dir_path, ts_path) os.mkdir(dir_path) print 'Calculate threat score...' for x in xrange(imgs_test_name.shape[0]): ts_img = np.zeros([3, imgs_true.shape[2], imgs_true.shape[3]], dtype='uint8') ts_img[0] = imgs_true[x][0] ts_img[1] = imgs_pred[x][0] ts_img[2] = imgs_pred[x][0] ts_img *= 255 ts = array_to_img(ts_img,scale=False) ts.save(os.path.join(dir_path,imgs_test_name[x]))
def visualize(dir_path, imgs_test_name, imgs, visualize=True): from image import combine_img, array_to_img, img_to_array tmp_list = list() for img_array in imgs: img_array *= 255 img = array_to_img(img_array, scale=False) tmp_list.append(img) total = imgs_test_name.shape[0] combined_imgs = np.zeros([total, 1, imgs.shape[2]*4, imgs.shape[3]*4], dtype = 'uint8') for x in xrange(total): combined_img = combine_img(tmp_list[16*x:16*(x+1)]) if visualize == True: combined_img.save(os.path.join(dir_path, imgs_test_name[x])) print 'save label image:', x+1, '/', total combined_imgs[x] = img_to_array(combined_img) / 255 return combined_imgs
exampleDir = ''.join([args.outdir, 'examples/']) if not os.path.exists(exampleDir): os.makedirs(exampleDir) np.random.seed(0) if args.subset: imPaths = np.random.choice(subset, 100) else: imPaths = np.random.choice(allPaths, 100) ims = np.asarray([ getOneStatic(imPath, args.sampSize, args.newSize) for imPath in imPaths ]) #ims = getBatch(101, allPaths, args.sampSize, args.newSize, args.threads) for i in range(100): image.array_to_img(ims[i] * 255.).save(''.join( [exampleDir, str(i), 'A.png'])) image.array_to_img(cae.predict(ims[i:i + 1])[0] * 255.).save( ''.join([exampleDir, str(i), 'B.png'])) # Perform PCA on encodings if args.subset: subset = list(pd.read_csv(args.subset, header=None)[0]) subset_indices = [i for i, j in enumerate(allPaths) if j in subset] repAll = repAll[subset_indices] pca = PCA(n_components=args.K, whiten=True) reduced = pca.fit_transform(repAll) vari = pca.explained_variance_ratio_ # nDims = np.where(np.cumsum(vari) > .98)[0][0] # reduced = reduced[:,:nDims] nDims = reduced.shape[1]
# ############################# allPaths = image.list_pictures(imagePath) repAll = np.zeros((len(allPaths), 1024)) for i in xrange(len(repAll)): print(i) for co in xrange(10): repAll[i, :] += sum(encode.predict(getOne(100, allPaths[i]))) repAll[i, :] /= 1000. # this shows the encoding and decoding for some random images ims = getBatch(101, imagePath) for i in xrange(100): image.array_to_img(ims[i] * 255.).save(outputName + '/' + str(i) + 'A.png') image.array_to_img(cae.predict(ims[i:i + 1])[0] * 255.).save(outputName + '/' + str(i) + 'B.png') K = 1024 pca = PCA(n_components=K, whiten=True) reduced = pca.fit_transform(repAll) vari = pca.explained_variance_ratio_ # nDims = np.where(np.cumsum(vari) > .98)[0][0] # reduced = reduced[:,:nDims] nDims = reduced.shape[1] with open(outputName + '_vari.txt', 'w') as fi: for x in np.cumsum(vari): fi.write(str(x) + '\n')