def main(_): args = parse_args() train_set, val_set = load_mat_data() table = [[] for i in range(6)] for f in train_set: if f['label_index'] != -1: table[f['label_index']].append(f) import random train_set_selected = [[] for i in range(args.number_of_nets)] train_images = [None for i in range(args.number_of_nets)] train_labels = [None for i in range(args.number_of_nets)] for j in xrange(args.number_of_nets): for i in xrange(380): for x in xrange(6): ranint = random.randint(0, len(table[x]) - 1) train_set_selected[j].append(table[x][ranint]) train_images[j], train_labels[j] = distorted_inputs( train_set_selected, "../train/") import get_net images = tf.placeholder( tf.float32, shape=[FLAGS.batch_size, FLAGS.input_size, FLAGS.input_size, 3]) is_training = tf.placeholder(tf.bool) logits = get_net.preprocessing(args, images, 'final_layer', is_training) train(args, logits, is_training, images, train_images, train_labels, val_set, args.loss_function)
def main(_): [train_images, train_expand_images, train_labels], [val_images, val_expand_images, val_labels ] = distorted_inputs(target_labels=[0, 1, 2, 3, 4], true_labels=[0, 1, 2, 3, 4]) # print train_images is_training = tf.placeholder('bool', [], name='is_training') images, expand_images, labels = tf.cond( is_training, lambda: (train_images, train_expand_images, train_labels), lambda: (val_images, val_expand_images, val_labels)) # with tf.Session() as sess: # tf.train.start_queue_runners(sess=sess) # one_hot_label = tf.one_hot(tf.cast(labels, tf.uint8), depth=5) # # print sess.run(labels, feed_dict={is_training: True}) # print np.shape(sess.run(one_hot_label, feed_dict={is_training: True})) print labels logits = inference_small( images, expand_images, phase_names=['NC', 'ART', 'PV'], num_classes=5, is_training=True, ) print labels save_model_path = '/home/give/PycharmProjects/MedicalImage/Net/BaseNet/ResNetMultiPhaseExpand/models' train(is_training, logits, images, expand_images, labels, save_model_path=save_model_path, step_width=50)
def main(_): cifar10.maybe_download_and_extract() train_set, train_labels = cifar10.prepare_train_data(padding_size=0) if tf.gfile.Exists(FLAGS.train_dir): tf.gfile.DeleteRecursively(FLAGS.train_dir) tf.gfile.MakeDirs(FLAGS.train_dir) resnet_train.train(train_set, train_labels)
def main(_): images, labels = distorted_inputs() is_training = tf.placeholder('bool',[], name='is_training') logits = inference(images, num_classes=2, is_training=is_training, bottleneck=True, num_blocks=[3, 4, 6, 3]) train(is_training,logits, images, labels)
def main(_): images, labels = distorted_inputs() logits = inference(images, num_classes=1000, is_training=True, bottleneck=False, num_blocks=[2, 2, 2, 2]) train(logits, images, labels)
def main(argv=None): # pylint: disable=unused-argument maybe_download_and_extract() if not FLAGS.resume: if tf.gfile.Exists(FLAGS.train_dir): tf.gfile.DeleteRecursively(FLAGS.train_dir) tf.gfile.MakeDirs(FLAGS.train_dir) images, labels = distorted_inputs(FLAGS.data_dir, FLAGS.batch_size) train(images, labels, small=True)
def main(_): images, labels = distorted_inputs(FLAGS.data_dir, FLAGS.train_lst) is_training = tf.placeholder('bool', [], name='is_training') # placeholder for the fusion part logits = inference(images, num_classes=FLAGS.num_classes, is_training=is_training, num_blocks=[3, 4, 6, 3]) train(is_training,logits, images, labels)
def main(_): cifar10.maybe_download_and_extract() train_set, train_labels = cifar10.prepare_train_data(padding_size=2) # do some debugging tests here #print(train_set.shape, train_labels.shape) #print("==========================================") #exit() if tf.gfile.Exists(FLAGS.train_dir): tf.gfile.DeleteRecursively(FLAGS.train_dir) tf.gfile.MakeDirs(FLAGS.train_dir) resnet_train.train(train_set, train_labels)
def main(_): [train_images, train_labels], [val_images, val_labels] = distorted_inputs() print train_images is_training = tf.placeholder('bool', [], name='is_training') images, labels = tf.cond(is_training, lambda: (train_images, train_labels), lambda: (val_images, val_labels)) logits = inference( images, num_classes=2, is_training=True, bottleneck=False, ) save_model_path = '/home/give/PycharmProjects/StomachCanner/classification/Net/ResNet/models/method4' train(is_training, logits, images, labels, save_model_path=save_model_path)
def main(argv=None): # pylint: disable=unused-argument maybe_download_and_extract() images_train, labels_train = distorted_inputs(FLAGS.data_dir, FLAGS.batch_size) images_val, labels_val = inputs(True, FLAGS.data_dir, FLAGS.batch_size) is_training = tf.placeholder('bool', [], name='is_training') images, labels = tf.cond(is_training, lambda: (images_train, labels_train), lambda: (images_val, labels_val)) logits = inference_small(images, num_classes=10, is_training=is_training, use_bias=(not FLAGS.use_bn), num_blocks=3) train(is_training, logits, images, labels)
def main(_): image_size = 224 image_shape = [FLAGS.batch_size, image_size + 3, image_size + 3, 3] labels = tf.Variable(tf.ones([FLAGS.batch_size], dtype=tf.int32)) images = tf.Variable( tf.random_normal(image_shape, dtype=tf.float32, stddev=1e-1)) #images, labels = distorted_inputs() logits = inference(images, num_classes=1000, is_training=True, bottleneck=True) # use default: resnet-50 #num_blocks=[2, 2, 2, 2]) train(True, logits, images, labels)
def main(_): [train_images, train_labels], [val_images, val_labels] = distorted_inputs() is_training = tf.placeholder('bool', [], name='is_training') images, labels = tf.cond(is_training, lambda: (train_images, train_labels), lambda: (val_images, val_labels)) logits_multi_task = [] for i in range(FLAGS.letter_num_per_vc): logits = inference( images, task_name='task_' + str(i), num_classes=FLAGS.max_single_vc_length, is_training=True, bottleneck=False, ) logits_multi_task.append(logits) save_model_path = '/home/give/PycharmProjects/AIChallenger/ResNet/models' train(is_training, logits_multi_task, images, labels, save_model_path=save_model_path)
def main(_): print '-----with device: %s' % get_device_str() with tf.Graph().as_default(), tf.device(get_device_str()): image_size = 224 #image_shape = [FLAGS.batch_size, image_size + 3, image_size + 3, 3] with tf.device('/cpu:0'): image_shape = [FLAGS.batch_size, image_size, image_size, 3] labels = tf.Variable(tf.ones([FLAGS.batch_size], dtype=tf.int32)) images = tf.Variable( tf.random_normal(image_shape, dtype=tf.float32, stddev=1e-1)) #images, labels = distorted_inputs() logits = inference(images, num_classes=1000, is_training=True, bottleneck=True) # use default: resnet-50 #num_blocks=[2, 2, 2, 2]) train(True, logits, images, labels)
def main(argv=None): # pylint: disable=unused-argument maybe_download_and_extract() # different input behaviors for training and testing # via seperated ops # At training, distorted_input shuffles, distorts, and augments training set # At testing, inputs just normally reads in testing set. # Then, use a is_training tensor to switch between the two branches. images_train, labels_train = distorted_inputs(FLAGS.data_dir, FLAGS.batch_size) images_val, labels_val = inputs(True, FLAGS.data_dir, FLAGS.batch_size) is_training = tf.placeholder('bool', [], name='is_training') images, labels = tf.cond(is_training, lambda: (images_train, labels_train), lambda: (images_val, labels_val)) logits = inference_small(images, num_classes=10, is_training=is_training, use_bias=(not FLAGS.use_bn), num_blocks=3) train(is_training, logits, images, labels)
def main(_): train_dataset, val_dataset = distorted_inputs() train_batch = generate_next_batch(train_dataset, net_config.BATCH_SIZE, None) val_batch = generate_next_batch(val_dataset, net_config.BATCH_SIZE, None) is_training = tf.placeholder('bool', [], name='is_training') image_tensor = tf.placeholder(tf.float32, [ None, net_config.IMAGE_W, net_config.IMAGE_H, net_config.IMAGE_CHANNEL ]) label_tensor = tf.placeholder(tf.int32, [None]) logits = inference( image_tensor, num_classes=2, is_training=True, bottleneck=False, ) save_model_path = '/home/give/PycharmProjects/StomachCanner/classification/Net/ResNetHeatMap/models/method5-512/1740.0' train(is_training, logits, image_tensor, label_tensor, train_batch, val_batch, save_model_path=save_model_path)
def main(_): images, labels = distorted_inputs() train(images, labels)
statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dest_directory) # def main(argv=None): # pylint: disable=unused-argument maybe_download_and_extract() # two tensors which represents each batch images_train, labels_train = distorted_inputs(FLAGS.data_dir, FLAGS.batch_size) images_val, labels_val = inputs(True, FLAGS.data_dir, FLAGS.batch_size) images_test, labels_test = get_test_data(True, FLAGS.data_dir, FLAGS.batch_size) is_training = tf.placeholder('bool', [], name='is_training') images, labels = tf.cond(is_training, lambda: (images_train, labels_train), lambda: (images_test, labels_test)) logits = inference_small(images, num_classes=10, is_training=is_training, use_bias=(not FLAGS.use_bn), num_blocks=3) train(is_training, logits, images, labels) # if __name__ == '__main__': # tf.app.run()
def main(argv=None): # pylint: disable=unused-argument maybe_download_and_extract() images, labels = distorted_inputs(FLAGS.data_dir, FLAGS.batch_size) train(images, labels, small=True)