def read_inputs_(sample_dir, train_rate, batch_size, is_training=True, num_threads=20): train_data, train_label, valid_data, valid_label, train_n, valid_n, note_label = load_image( sample_dir, train_rate).gen_train_valid() # Create a queue that produces the filenames to read. train_filename_queue = tf.train.slice_input_producer( [train_data, train_label], shuffle=True, capacity=1024) valid_filename_queue = tf.train.slice_input_producer( [valid_data, valid_label], shuffle=False, capacity=1024, num_epochs=1) # Read examples from files in the filename queue. train_file_content = tf.read_file(train_filename_queue[0]) valid_file_content = tf.read_file(valid_filename_queue[0]) # Read JPEG or PNG or GIF image from file train_reshaped_image = tf.to_float( tf.image.decode_jpeg(train_file_content, channels=3)) valid_reshaped_image = tf.to_float( tf.image.decode_jpeg(valid_file_content, channels=3)) # Resize image to 256*256 train_reshaped_image = tf.image.resize_images(train_reshaped_image, [299, 299]) valid_reshaped_image = tf.image.resize_images(valid_reshaped_image, [299, 299]) train_label = tf.cast(train_filename_queue[1], tf.int64) valid_label = tf.cast(valid_filename_queue[1], tf.int64) train_reshaped_image = _train_preprocess(train_reshaped_image) valid_reshaped_image = _test_preprocess(valid_reshaped_image) # Ensure that the random shuffling has good mixing properties. min_fraction_of_examples_in_queue = 0.4 min_queue_examples = int(5000 * min_fraction_of_examples_in_queue) #print(batch_size) print('Filling queue with %d images before starting to train. ' 'This may take some times.' % min_queue_examples) train_images, train_label_batch = tf.train.batch( [train_reshaped_image, train_label], batch_size=batch_size, allow_smaller_final_batch=True if not is_training else False, num_threads=num_threads, capacity=min_queue_examples + 3 * batch_size) valid_images, valid_label_batch = tf.train.batch( [valid_reshaped_image, valid_label], batch_size=batch_size, allow_smaller_final_batch=True if not is_training else False, num_threads=num_threads, capacity=min_queue_examples + 3 * batch_size) return train_images, train_label_batch, valid_images, valid_label_batch
learning_rate_base = config.learning_rate_base decay_rate = config.decay_rate height, width = config.height, config.width # 模型保存的路径 train_dir = config.train_dir # 是否进行fine-tune。 选择fine-tune的的参数 fine_tune = config.fine_tune # 训练所有层的参数 train_all_layers = config.train_all_layers # 迁移学习的网络模型 checkpoint_path = config.checkpoint_path from lib.train.train_GANs import train_GANs as train # train_data, train_label, valid_data, valid_label, train_n, valid_n, note_label = data_load_from_txt_mullabel(sample_dir, train_rate).gen_train_valid() train_data, train_label, valid_data, valid_label, train_n, valid_n, note_label = load_image( sample_dir, train_rate).gen_train_valid() print('note_label', note_label) print(train_data) print(train_label) train_label = to_one_hot(train_label, num_classes) valid_label = to_one_hot(valid_label, num_classes) print(train_label) if not os.path.isdir(train_dir): os.makedirs(train_dir) train(train_data, train_label, valid_data, valid_label, train_dir, num_classes, batch_size, arch_model, learning_r_decay, learning_rate_base, decay_rate, dropout_prob, epoch, height, width, checkpoint_exclude_scopes, early_stop, EARLY_STOP_PATIENCE, fine_tune, train_all_layers,
def read_inputs(sample_dir, train_rate, batch_size, is_training=True, num_threads=20): train_data, train_label, valid_data, valid_label, train_n, valid_n, note_label = load_image( sample_dir, train_rate).gen_train_valid() num_threads = num_threads num_prefetch = 5 * batch_size train_num_sample = len(train_data) valid_num_sample = len(valid_data) train_images = tf.convert_to_tensor(train_data, dtype=tf.string) train_labels = tf.convert_to_tensor(train_label, dtype=tf.int64) train_data = tf.data.Dataset.from_tensor_slices( (train_images, train_labels)) train_data = train_data.shuffle(buffer_size=train_num_sample) train_data = train_data.map( _parse_data, num_parallel_calls=num_threads).prefetch(num_prefetch) train_data = _data_aug(train_data, num_threads, num_prefetch) train_data = train_data.batch(batch_size) epoch = None # 无限重复数据集 train_data = train_data.repeat(epoch) # train_iterator = tf.data.Iterator.from_structure(train_data.output_types, train_data.output_shapes) train_iterator = train_data.make_one_shot_iterator() train_next_element = train_iterator.get_next() train_images, train_label_batch = train_next_element # ----------------------------------------------------------------------------------- # valid_images = tf.convert_to_tensor(valid_data, dtype=tf.string) valid_labels = tf.convert_to_tensor(valid_label, dtype=tf.int64) valid_data = tf.data.Dataset.from_tensor_slices( (valid_images, valid_labels)) valid_data = valid_data.shuffle(buffer_size=valid_num_sample) valid_data = valid_data.map( _parse_data, num_parallel_calls=num_threads).prefetch(num_prefetch) valid_data = _data_aug(valid_data, num_threads, num_prefetch, augment=[]) # 要做归一化啊 valid_data = valid_data.batch(batch_size) epoch = None # 无限重复数据集 valid_data = valid_data.repeat(epoch) # valid_iterator = tf.data.Iterator.from_structure(valid_data.output_types, valid_data.output_shapes) valid_iterator = valid_data.make_one_shot_iterator() valid_next_element = valid_iterator.get_next() valid_images, valid_label_batch = valid_next_element #with tf.Session() as sess: # image, mask = sess.run([train_images, train_label_batch]) # print (mask) return train_images, train_label_batch, valid_images, valid_label_batch, train_n, valid_n