def predict(img, model, in_channels): img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if in_channels == 1: img = normalize_image(img) img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) else: img = adjust_gamma(img) img = cv2.resize(img, IMAGE_SIZE) img = img / 255 im = img.astype(np.float32).reshape( 1, IMAGE_SIZE[0], IMAGE_SIZE[1], in_channels) y = model.predict([im]) [pred] = y pred = softmax(pred) print(('[{:.5f} {:.5f}] ' * (len(pred) // 2)).format(*pred)) return pred, img
def load_data(dirname, in_channels): dirs = ['001_00', '001_01', '005_00', '005_01', '010_00', '010_01', '050_00', '050_01', '100_00', '100_01', '500_00', '500_01'] count = 0 for i, dir in enumerate(dirs): for r, ds, fs in os.walk(os.path.join(dirname, dir)): count += len(fs) xs = np.zeros((count, in_channels, IMAGE_SIZE[0], IMAGE_SIZE[1])).astype( np.float32) ys = np.zeros(count).astype(np.int32) idx = 0 for i, dir in enumerate(dirs): for r, ds, fs in os.walk(os.path.join(dirname, dir)): for f in fs: filename = os.path.join(r, f) img = cv2.imread(filename) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if in_channels == 1: img = normalize_image(img) img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) else: img = adjust_gamma(img) img = cv2.resize(img, IMAGE_SIZE) img = img / 255 im = img.astype(np.float32).reshape( IMAGE_SIZE[0], IMAGE_SIZE[1], in_channels).transpose(2, 0, 1) xs[idx] = im ys[idx] = i idx += 1 return tuple_dataset.TupleDataset(xs, ys)
def load_data(config, views, shape_list, shuffle=True, batch_size=-1): """ input: config tf.app.flags command line arguments views vw.View view points information shape_list list of string input shape name list shuffle bool whether input shape list should be shuffled output: name_batch n x string shape names source_batch n x H x W x Ci source images target_batch (n*m) x H x W x Co target images in m views mask_batch (n*m) x H x W x 1 target boolean masks in m views angle_batch (n*m) x 4 target viewing angle params in m views num_shapes int number of loaded shapes """ if batch_size==-1: batch_size = config['batch_size'] # handle affix num_source_views = len(config['sketch_views']) source_prefix_list = ['sketch/' for view in range(num_source_views)] source_interfix_list = ['/sketch-%c' % v for v in config['sketch_views']] if config['test']: sketch_variation = '0' else: sketch_variation_queue = tf.train.string_input_producer(['%d' % v for v in range(config['sketch_variations'])], shuffle=True) sketch_variation = sketch_variation_queue.dequeue() source_suffix_list = ['-'+sketch_variation+'.png' for view in range(num_source_views)] num_dnfs_views = max(2, len(config['sketch_views'])) dnfs_prefix_list = ['dnfs/' for view in range(num_dnfs_views)] dnfs_interfix_list = ['/dnfs-%d' % config['image_size'] for view in range(num_dnfs_views)] dnfs_suffix_list = ['-%d.png' % view for view in range(num_dnfs_views)] num_dn_views = 12 dn_prefix_list = ['dn/' for view in range(num_dn_views)] dn_interfix_list = ['/dn-%d' % config['image_size'] for view in range(num_dn_views)] dn_suffix_list = ['-%d.png' % view for view in range(num_dn_views)] num_target_views = num_dnfs_views + num_dn_views target_prefix_list = dnfs_prefix_list + dn_prefix_list target_interfix_list = dnfs_interfix_list + dn_interfix_list target_suffix_list = dnfs_suffix_list + dn_suffix_list num_target_views = views.num_views # build input queue if config['continuous_view'] and config['test']: shape_list_queue = tf.train.input_producer([name for name in shape_list for view in range(num_target_views)], shuffle=False) else: shape_list_queue = tf.train.input_producer(shape_list, shuffle=shuffle) # load data from queue shape_name = shape_list_queue.dequeue() source_files = [config['data_dir']+source_prefix_list[view]+shape_name+source_interfix_list[view]+source_suffix_list[view] for view in range(num_source_views)] if not config['continuous_view']: target_files = [config['data_dir']+target_prefix_list[view]+shape_name+target_interfix_list[view]+target_suffix_list[view] for view in range(num_target_views)] target_angles = tf.zeros([num_target_views, 4]) else: angle_list = [vw.view2angle(view) for view in views.views] view_list_queue = tf.train.slice_input_producer([angle_list, target_prefix_list, target_interfix_list, target_suffix_list], shuffle=(not config['test'])) target_files = [config['data_dir']+view_list_queue[1]+shape_name+view_list_queue[2]+view_list_queue[3]] # only one single image target_angles = [view_list_queue[0]] # decode source images source_images = [tf.image.decode_png(tf.read_file(file), channels=1, dtype=tf.uint8) for file in source_files] source_image = tf.concat(source_images, 2) # put multi-view images into different channels source_image = image.normalize_image(tf.slice(source_image, [0,0,0], [config['image_size'], config['image_size'], -1])) # just do a useless slicing to establish size source_image = tf.concat([source_image, tf.image.flip_left_right(source_image)], 2) # HACK: add horizontally flipped image as input # decode target images if not config['test']: target_images = tf.stack([tf.image.decode_png(tf.read_file(file), channels=4, dtype=tf.uint16) for file in target_files]) target_images = image.normalize_image(tf.slice(target_images, [0,0,0,0], [-1,config['image_size'], config['image_size'], -1])) else: target_images = tf.ones([len(target_files), config['image_size'], config['image_size'], 4]) # dummy target for testing target_masks = image.extract_boolean_mask(target_images) if config['predict_normal']: # pre-process normal background target_shape = target_images.get_shape().as_list() target_background = tf.concat([tf.zeros(target_shape[:-1]+[2]), tf.ones(target_shape[:-1]+[2])], 3) # (0,0,1,1) target_images = tf.where(tf.tile(target_masks, [1,1,1,target_shape[3]]), target_images, target_background) else: # retain depth only target_images = tf.slice(target_images, [0,0,0,3], [-1,-1,-1,1]) target_images = tf.concat([target_images, image.convert_to_real_mask(target_masks)], 3) # create prefetching tensor num_shapes = len(shape_list) min_queue_examples = max(1, int(num_shapes * 0.01)) tensor_data = [shape_name, source_image, target_images, target_masks, target_angles] if shuffle: num_preprocess_threads = 12 batch_data = tf.train.shuffle_batch( tensor_data, batch_size=batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * batch_size, min_after_dequeue=min_queue_examples) else: num_preprocess_threads = 1 batch_data = tf.train.batch( tensor_data, batch_size=batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples) name_batch = batch_data[0] source_batch = batch_data[1] target_batch = batch_data[2] target_batch = tf.reshape(target_batch, [-1]+target_batch.get_shape().as_list()[2:]) mask_batch = batch_data[3] mask_batch = tf.reshape(mask_batch, [-1]+mask_batch.get_shape().as_list()[2:]) angle_batch = batch_data[4] angle_batch = tf.reshape(angle_batch, [-1]+angle_batch.get_shape().as_list()[2:]) #print('name: ', name_batch) #print('source: ', source_batch) #print('target: ', target_batch) #print('mask: ', mask_batch) #print('angle: ', angle_batch) return name_batch, source_batch, target_batch, mask_batch, angle_batch, num_shapes
d_loss += (d_fake_loss + d_valid_loss) / 2 # Train Generator noise = generate_noise(batch_size) discriminator.trainable = False g_loss += gan.train_on_batch(noise, y_valid) discriminator_loss.append(d_loss / batches) generator_loss.append(g_loss / batches) if epoch % PLOT_FRECUENCY == 0: plot_images(epoch, generator) plot_loss(epoch, generator_loss, discriminator_loss) plot_test(epoch, x_test, generator) generator.save('Save_model/generator.h5') discriminator.save('Save_model/discriminator.h5') gan.save('Save_model/gan.h5') #save_images(generator) if __name__ == '__main__': #Load data x_train = np.load('../Save_Data/train_data_64.npy') x_train = x_train * 2 - 1 x_test = np.load('../Save_Data/yaya_64.npy') x_test = normalize_image(x_test) training(x_train, x_test, epochs=200)
def load_data(config, pattern_list, shuffle=True, batch_size=-1): """ input: config tf.app.flags command line arguments pattern_list list of string input pattern name list shuffle bool whether input list should be shuffled output: name_batch n x string pattern names image_batch n x H x W x 1 pattern images triplet_batch n x T x 3 x 2 triplets of patch coordinates (# triplets x {A,B,C} x {h,w}) num_patterns int number of loaded patterns """ print('Loading data...') if batch_size == -1: batch_size = config.batch_size # build input queue pattern_list_queue = tf.train.input_producer(pattern_list, shuffle=shuffle) pattern_name = pattern_list_queue.dequeue() # decode pattern image #image_file = config.data_dir+'image/'+pattern_name+'.png' image_file = config.data_dir + 'region/' + pattern_name + '.png' image_tensor = tf.image.decode_png(tf.read_file(image_file), channels=1, dtype=tf.uint8) image_tensor = image.normalize_image( tf.slice(image_tensor, [0, 0, 0], [config.image_size, config.image_size, -1 ])) # just do a useless slicing to establish size # decode pattern triplets if not config.real_data: triplet_file = config.data_dir + 'triplet-region/' + pattern_name + '.bin' triplet_data = tf.read_file(triplet_file) triplet_tensor = tf.reshape(tf.decode_raw(triplet_data, tf.int16), [-1, 3, 2]) triplet_tensor = tf.cast(triplet_tensor, dtype=tf.int32) triplet_tensor = tf.slice(tf.random_shuffle(triplet_tensor), [0, 0, 0], [config.num_triplets, -1, -1]) else: triplet_tensor = tf.zeros([1, 3, 2], dtype=tf.int32) # create prefetching tensor num_patterns = len(pattern_list) min_queue_examples = max(1, int(num_patterns * 0.01)) tensor_data = [pattern_name, image_tensor, triplet_tensor] if shuffle: num_preprocess_threads = 12 batch_data = tf.train.shuffle_batch( tensor_data, batch_size=batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * batch_size, min_after_dequeue=min_queue_examples) else: num_preprocess_threads = 1 batch_data = tf.train.batch(tensor_data, batch_size=batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples) name_batch = batch_data[0] image_batch = batch_data[1] triplet_batch = batch_data[2] print('name: ', name_batch) print('image: ', image_batch) print('triplet: ', triplet_batch) return name_batch, image_batch, triplet_batch, num_patterns