def show_train_accuracies(): BATCH_SIZE = 512 train_accuracies = [] tf.logging.set_verbosity(tf.logging.DEBUG) checkpoint_path = tf.train.latest_checkpoint("./log/train") print(checkpoint_path) images, labels = read_traincsv() print(labels.shape) images_x = tf.placeholder("float", [None, 28, 28, 1]) labels_y = tf.placeholder("int64", [None, 10]) predictions, _ = lenet.lenet(images_x) predictions = tf.to_int64(tf.argmax(predictions, 1)) correct_predict = tf.equal(predictions, tf.argmax(labels_y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_predict, 'float')) saver = tf.train.Saver() with tf.Session() as sess: tf.global_variables_initializer().run() saver.restore(sess, checkpoint_path) for i in range(images.shape[0] // BATCH_SIZE): # p =predictions.eval(feed_dict={images_x: images[i*BATCH_SIZE:(i+1)*BATCH_SIZE]}) # print(p) train_accuracy = accuracy.eval( feed_dict={ images_x: images[i * BATCH_SIZE:(i + 1) * BATCH_SIZE], labels_y: labels[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] }) train_accuracies.append(train_accuracy) av_train_accuracies = sum(train_accuracies) / len(train_accuracies) print("av_train_accuracies %f" % av_train_accuracies) sess.close()
def save_test(): BATCH_SIZE = 1 tf.logging.set_verbosity(tf.logging.DEBUG) checkpoint_path = tf.train.latest_checkpoint("./log/train") print(checkpoint_path) images = read_testcsv() print(images.shape) images_x = tf.placeholder("float", [None, 28, 28, 1]) predictions, _ = lenet.lenet(images_x) predictions = tf.to_int64(tf.argmax(predictions, 1)) predicted_lables = np.zeros(images.shape[0]) saver = tf.train.Saver() with tf.Session() as sess: tf.global_variables_initializer().run() saver.restore(sess, checkpoint_path) for i in range(images.shape[0]): predicted_lables[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] = predictions.eval(feed_dict={ images_x: images[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] }) np.savetxt('submission.csv', np.c_[range(1, len(images) + 1), predicted_lables], delimiter=',', header='ImageId,Label', comments='', fmt='%d') sess.close()
def main(data_num): sc = init_nncontext() # get data, pre-process and create TFDataset (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", "test") images_data = (images_data[:data_num] - mnist.TRAIN_MEAN) / mnist.TRAIN_STD labels_data = labels_data[:data_num].astype(np.int32) dataset = TFDataset.from_ndarrays((images_data, labels_data), batch_per_thread=20) # construct the model from TFDataset images, labels = dataset.tensors labels = tf.squeeze(labels) with slim.arg_scope(lenet.lenet_arg_scope()): logits, end_points = lenet.lenet(images, num_classes=10, is_training=False) predictions = tf.to_int32(tf.argmax(logits, axis=1)) correct = tf.expand_dims(tf.to_int32(tf.equal(predictions, labels)), axis=1) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, "/tmp/lenet/model") predictor = TFPredictor(sess, [correct]) accuracy = predictor.predict().mean() print("predict accuracy is %s" % accuracy)
def main(args): tf.logging.set_verbosity(tf.logging.DEBUG) # load the dataset dataset = mnist.get_split('test', FLAGS.data_dir) # load batch images, labels = load_batch( dataset, FLAGS.batch_size, is_training=False) print(images,labels) # get the model prediction predictions,_ = lenet.lenet(images) # convert prediction values for each class into single class prediction predictions = tf.to_int64(tf.argmax(predictions, 1)) # streaming metrics to evaluate metrics_to_values, metrics_to_updates = metrics.aggregate_metric_map({ 'mse': metrics.streaming_mean_squared_error(predictions, labels), 'accuracy': metrics.streaming_accuracy(predictions, labels), # 'Recall_3': slim.metrics.streaming_recall_at_k(predictions, labels, 3), }) # write the metrics as summaries for metric_name, metric_value in metrics_to_values.items(): summary_name = 'eval/%s' % metric_name tf.summary.scalar(summary_name, metric_value) # for name, value in metrics_to_values.items(): # summary_name = 'eval/%s' % name # op = tf.summary.scalar(summary_name, value, collections=[]) # op = tf.Print(op, [value], summary_name) # tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) # evaluate on the model saved at the checkpoint directory # evaluate every eval_interval_secs # slim.evaluation.evaluation_loop( # '', # FLAGS.checkpoint_dir, # FLAGS.log_dir, # num_evals=FLAGS.num_evals, # eval_op=list(metrics_to_updates.values()), # eval_interval_secs=FLAGS.eval_interval_secs) checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) num_batches = math.ceil(10000 / float(FLAGS.batch_size)) metric_values =slim.evaluation.evaluate_once( master ='', checkpoint_path =checkpoint_path, logdir =FLAGS.log_dir, num_evals=num_batches, eval_op=list(metrics_to_updates.values()), final_op=list(metrics_to_values.values()) ) for metric, value in zip(metrics_to_values.keys(), metric_values): print("%s: %f" %(metric, value))
def _build_lenet_model(is_training, images, params): with slim.arg_scope(lenet.lenet_arg_scope()): out, _ = lenet.lenet(images, num_classes=None, is_training=is_training) tf.logging.info("lenet preembedding shape{}".format( out.get_shape().as_list())) #out = tf.reshape(out, [-1, 1024]) out = tf.layers.dense(out, params.embedding_size, name="embeddings") return out
def main(data_num): data_path = '/tmp/mnist' if not args.data_path else args.data_path cluster_mode = args.cluster_mode if cluster_mode.startswith("yarn"): hadoop_conf = os.environ.get("HADOOP_CONF_DIR") assert hadoop_conf, "Directory path to hadoop conf not found for yarn-client mode. Please " \ "set the environment variable HADOOP_CONF_DIR" spark_conf = create_spark_conf().set("spark.executor.memory", "5g") \ .set("spark.executor.cores", 2) \ .set("spark.executor.instances", 2) \ .set("spark.executorEnv.HTTP_PROXY", "http://child-prc.intel.com:913") \ .set("spark.executorEnv.HTTPS_PROXY", "http://child-prc.intel.com:913") \ .set("spark.driver.memory", "2g") if cluster_mode == "yarn-client": sc = init_nncontext(spark_conf, cluster_mode="yarn-client", hadoop_conf=hadoop_conf) else: sc = init_nncontext(spark_conf, cluster_mode="yarn-cluster", hadoop_conf=hadoop_conf) else: sc = init_nncontext() # get data, pre-process and create TFDataset (images_data, labels_data) = mnist.read_data_sets(data_path, "test") images_data = (images_data[:data_num] - mnist.TRAIN_MEAN) / mnist.TRAIN_STD labels_data = labels_data[:data_num].astype(np.int32) dataset = TFDataset.from_ndarrays((images_data, labels_data), batch_per_thread=20) # construct the model from TFDataset images, labels = dataset.tensors labels = tf.squeeze(labels) with slim.arg_scope(lenet.lenet_arg_scope()): logits, end_points = lenet.lenet(images, num_classes=10, is_training=False) predictions = tf.to_int32(tf.argmax(logits, axis=1)) correct = tf.expand_dims(tf.to_int32(tf.equal(predictions, labels)), axis=1) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, "/tmp/lenet/model") predictor = TFPredictor(sess, [correct]) accuracy = predictor.predict().mean() print("predict accuracy is %s" % accuracy)
def lenet_net(image, reuse=tf.AUTO_REUSE, keep_prop=0.5): image = tf.reshape(image, [-1, 28, 28, 1]) with tf.variable_scope(name_or_scope='LeNet', reuse=reuse): arg_scope = lenet.lenet_arg_scope() with slim.arg_scope(arg_scope): logits, end_point = lenet.lenet(image, 10, is_training=True, dropout_keep_prob=keep_prop) probs = tf.nn.softmax(logits) # probabilities return logits, probs, end_point
def main(max_epoch): sc = init_orca_context(cores=4, memory="2g") # get DataSet mnist_train = tfds.load(name="mnist", split="train") mnist_test = tfds.load(name="mnist", split="test") # Normalizes images def normalize_img(data): data['image'] = tf.cast(data["image"], tf.float32) / 255. return data mnist_train = mnist_train.map( normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE) mnist_test = mnist_test.map( normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE) # tensorflow inputs images = tf.placeholder(dtype=tf.float32, shape=(None, 28, 28, 1)) # tensorflow labels labels = tf.placeholder(dtype=tf.int32, shape=(None, )) with slim.arg_scope(lenet.lenet_arg_scope()): logits, end_points = lenet.lenet(images, num_classes=10, is_training=True) loss = tf.reduce_mean( tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels)) acc = accuracy(logits, labels) # create an estimator est = Estimator.from_graph(inputs=images, outputs=logits, labels=labels, loss=loss, optimizer=tf.train.AdamOptimizer(), metrics={"acc": acc}) est.fit(data=mnist_train, batch_size=320, epochs=max_epoch, validation_data=mnist_test) result = est.evaluate(mnist_test) print(result) est.save_tf_checkpoint("/tmp/lenet/model") stop_orca_context()
def model_fn(features, labels, mode): from nets import lenet slim = tf.contrib.slim with slim.arg_scope(lenet.lenet_arg_scope()): logits, end_points = lenet.lenet(features, num_classes=10, is_training=True) if mode == tf.estimator.ModeKeys.EVAL or mode == tf.estimator.ModeKeys.TRAIN: loss = tf.reduce_mean( tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels)) return TFEstimatorSpec(mode, predictions=logits, loss=loss) else: return TFEstimatorSpec(mode, predictions=logits)
def main(): """ You can also run these commands manually to generate the pb file 1. git clone https://github.com/tensorflow/models.git 2. export PYTHONPATH=Path_to_your_model_folder 3. python alexnet.py """ height, width = 32, 32 inputs = tf.Variable(tf.random_uniform((1, height, width, 3)), name='input') inputs = tf.identity(inputs, "input_node") net, end_points = lenet.lenet(inputs) print("nodes in the graph") for n in end_points: print(n + " => " + str(end_points[n])) net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(',')) run_model(net_outputs, argv[1], 'LeNet', argv[3] == 'True')
def main(max_epoch, data_num): sc = init_nncontext() # get data, pre-process and create TFDataset def get_data_rdd(dataset): (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", dataset) image_rdd = sc.parallelize(images_data[:data_num]) labels_rdd = sc.parallelize(labels_data[:data_num]) rdd = image_rdd.zip(labels_rdd) \ .map(lambda rec_tuple: [normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD), np.array(rec_tuple[1])]) return rdd training_rdd = get_data_rdd("train") testing_rdd = get_data_rdd("test") dataset = TFDataset.from_rdd(training_rdd, names=["features", "labels"], shapes=[[28, 28, 1], []], types=[tf.float32, tf.int32], batch_size=280, val_rdd=testing_rdd) # construct the model from TFDataset images, labels = dataset.tensors with slim.arg_scope(lenet.lenet_arg_scope()): logits, end_points = lenet.lenet(images, num_classes=10, is_training=True) loss = tf.reduce_mean( tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels)) # create a optimizer optimizer = TFOptimizer(loss, Adam(1e-3), val_outputs=[logits], val_labels=[labels], val_method=Top1Accuracy()) optimizer.set_train_summary(TrainSummary("/tmp/az_lenet", "lenet")) optimizer.set_val_summary(ValidationSummary("/tmp/az_lenet", "lenet")) # kick off training optimizer.optimize(end_trigger=MaxEpoch(max_epoch)) saver = tf.train.Saver() saver.save(optimizer.sess, "/tmp/lenet/")
def main(): """ You can also run these commands manually to generate the pb file 1. git clone https://github.com/tensorflow/models.git 2. export PYTHONPATH=Path_to_your_model_folder 3. python alexnet.py """ height, width = 32, 32 inputs = tf.Variable(tf.random_uniform((1, height, width, 3)), name='input') net, end_points = lenet.lenet(inputs) print("nodes in the graph") for n in end_points: print(n + " => " + str(end_points[n])) net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split()) run_model(net_outputs, argv[1], 'LeNet')
def main(data_num): sc = init_nncontext() # get data, pre-process and create TFDataset (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", "test") image_rdd = sc.parallelize(images_data[:data_num]) labels_rdd = sc.parallelize(labels_data[:data_num]) rdd = image_rdd.zip(labels_rdd) \ .map(lambda rec_tuple: [normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD), np.array(rec_tuple[1])]) dataset = TFDataset.from_rdd(rdd, names=["features", "labels"], shapes=[[28, 28, 1], [1]], types=[tf.float32, tf.int32], batch_per_thread=20) # construct the model from TFDataset images, labels = dataset.tensors labels = tf.squeeze(labels) with slim.arg_scope(lenet.lenet_arg_scope()): logits, end_points = lenet.lenet(images, num_classes=10, is_training=False) predictions = tf.to_int32(tf.argmax(logits, axis=1)) correct = tf.expand_dims(tf.to_int32(tf.equal(predictions, labels)), axis=1) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, "/tmp/lenet/model") predictor = TFPredictor(sess, [correct]) accuracy = predictor.predict().mean() print("predict accuracy is %s" % accuracy)
def main(max_epoch, data_num): sc = init_nncontext() # get data, pre-process and create TFDataset (train_images_data, train_labels_data) = mnist.read_data_sets("/tmp/mnist", "train") (test_images_data, test_labels_data) = mnist.read_data_sets("/tmp/mnist", "train") train_images_data = (train_images_data[:data_num] - mnist.TRAIN_MEAN) / mnist.TRAIN_STD train_labels_data = train_labels_data[:data_num].astype(np.int) test_images_data = (test_images_data[:data_num] - mnist.TRAIN_MEAN) / mnist.TRAIN_STD test_labels_data = (test_labels_data[:data_num]).astype(np.int) dataset = TFDataset.from_ndarrays( (train_images_data, train_labels_data), batch_size=360, val_tensors=(test_images_data, test_labels_data)) # construct the model from TFDataset images, labels = dataset.tensors with slim.arg_scope(lenet.lenet_arg_scope()): logits, end_points = lenet.lenet(images, num_classes=10, is_training=True) loss = tf.reduce_mean( tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels)) acc = accuracy(logits, labels) # create a optimizer optimizer = TFOptimizer.from_loss(loss, Adam(1e-3), metrics={"acc": acc}, model_dir="/tmp/lenet/") # kick off training optimizer.optimize(end_trigger=MaxEpoch(max_epoch)) saver = tf.train.Saver() saver.save(optimizer.sess, "/tmp/lenet/model")
def main(): sc = init_nncontext() # get data, pre-process and create TFDataset (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", "train") image_rdd = sc.parallelize(images_data) labels_rdd = sc.parallelize(labels_data) rdd = image_rdd.zip(labels_rdd) \ .map(lambda rec_tuple: [normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD), np.array(rec_tuple[1])]) dataset = TFDataset.from_rdd(rdd, names=["features", "labels"], shapes=[(None, 28, 28, 1), (None, 1)], types=[tf.float32, tf.int32] ) # construct the model from TFDataset images, labels = dataset.inputs labels = tf.squeeze(labels) with slim.arg_scope(lenet.lenet_arg_scope()): logits, end_points = lenet.lenet(images, num_classes=10, is_training=True) loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels)) # create a optimizer optimizer = TFOptimizer(loss, Adam(1e-3)) # kick off training # you may change the MaxIteration to MaxEpoch(5) to make it converge optimizer.optimize(end_trigger=MaxIteration(20), batch_size=280) # evaluate (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", "test") images_data = normalizer(images_data, mnist.TRAIN_MEAN, mnist.TRAIN_STD) predictions = tf.argmax(logits, axis=1) predictions_data, loss_value = optimizer.sess.run([predictions, loss], feed_dict={images: images_data, labels: labels_data}) print(np.mean(np.equal(predictions_data, labels_data))) print(loss_value)
def main(max_epoch, data_num): sc = init_nncontext() # get data, pre-process and create TFDataset def get_data_rdd(dataset): (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", dataset) image_rdd = sc.parallelize(images_data[:data_num]) labels_rdd = sc.parallelize(labels_data[:data_num]) rdd = image_rdd.zip(labels_rdd) \ .map(lambda rec_tuple: [normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD), np.array(rec_tuple[1])]) return rdd training_rdd = get_data_rdd("train") testing_rdd = get_data_rdd("test") dataset = TFDataset.from_rdd(training_rdd, features=(tf.float32, [28, 28, 1]), labels=(tf.int32, []), batch_size=280, val_rdd=testing_rdd) # construct the model from TFDataset images, labels = dataset.tensors with slim.arg_scope(lenet.lenet_arg_scope()): logits, end_points = lenet.lenet(images, num_classes=10, is_training=True) loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels)) acc = accuracy(logits, labels) # create a optimizer optimizer = TFOptimizer.from_loss(loss, Adam(1e-3), metrics={"acc": acc}, model_dir="/tmp/lenet/") # kick off training optimizer.optimize(end_trigger=MaxEpoch(max_epoch)) saver = tf.train.Saver() saver.save(optimizer.sess, "/tmp/lenet/model")
def main(): sc = init_nncontext() # get data, pre-process and create TFDataset (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", "train") image_rdd = sc.parallelize(images_data) labels_rdd = sc.parallelize(labels_data) rdd = image_rdd.zip(labels_rdd) \ .map(lambda rec_tuple: [normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD), np.array(rec_tuple[1])]) dataset = TFDataset.from_rdd(rdd, names=["features", "labels"], shapes=[[28, 28, 1], [1]], types=[tf.float32, tf.int32], batch_size=280) # construct the model from TFDataset images, labels = dataset.tensors labels = tf.squeeze(labels) with slim.arg_scope(lenet.lenet_arg_scope()): logits, end_points = lenet.lenet(images, num_classes=10, is_training=True) loss = tf.reduce_mean( tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels)) # create a optimizer optimizer = TFOptimizer(loss, Adam(1e-3)) optimizer.set_train_summary(TrainSummary("/tmp/az_lenet", "lenet")) # kick off training for i in range(5): optimizer.optimize(end_trigger=MaxEpoch(i + 1)) saver = tf.train.Saver() saver.save(optimizer.sess, "/tmp/lenet/")
def build_train_op(image_tensor, label_tensor, is_training): lenet_argscope = lenet_arg_scope(weight_decay=FLAGS.weight_decay) global_step = tf.get_variable(name="global_step", shape=[], dtype=tf.int32, trainable=False) with slim.arg_scope(lenet_argscope): logits, end_points = lenet(image_tensor, is_training=is_training) loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label_tensor)) accuracy = tf.reduce_sum( tf.cast( tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), label_tensor), tf.int32)) end_points['loss'], end_points['accuracy'] = loss, accuracy if is_training: optimizer = tf.train.AdadeltaOptimizer( learning_rate=FLAGS.learning_rate) train_op = optimizer.minimize(loss, global_step=global_step) return train_op, end_points else: return None, end_points
def classify_image(filepath): with tf.Graph().as_default(): image = open(filepath, 'rb') # Open specified url and load image as a string image_string = image.read() # Decode string into matrix with intensity values image = tf.image.decode_png(image_string, channels=3) # Resize the input image, preserving the aspect ratio # and make a central crop of the resulted image. # The crop will be of the size of the default image size of # the network. processed_image = lenet_preprocessing.preprocess_image( image, image_size, image_size, is_training=False) # Networks accept images in batches. # The first dimension usually represents the batch size. # In our case the batch size is one. processed_images = tf.expand_dims(processed_image, 0) # Create the model, use the default arg scope to configure # the batch norm parameters. arg_scope is a very convenient # feature of slim library -- you can define default # parameters for layers -- like stride, padding etc. with slim.arg_scope(lenet.lenet_arg_scope()): logits, _ = lenet.lenet(processed_images, num_classes=11, is_training=False) # In order to get probabilities we apply softmax on the output. probabilities = tf.nn.softmax(logits) # Create a function that reads the network weights # from the checkpoint file that you downloaded. # We will run it in session later. init_fn = slim.assign_from_checkpoint_fn( os.path.join(checkpoints_dir, 'model.ckpt-10000'), slim.get_model_variables(None)) with tf.Session() as sess: # Load weights init_fn(sess) # We want to get predictions, image as numpy matrix # and resized and cropped piece that is actually # being fed to the network. np_image, network_input, probabilities = sess.run( [image, processed_image, probabilities]) probabilities = probabilities[0, 0:] sorted_inds = [ i[0] for i in sorted(enumerate(-probabilities), key=lambda x: x[1]) ] for i in range(11): index = sorted_inds[i] print('Probability %0.2f => [%s]' % (probabilities[index], names[index])) return sorted_inds[0], probabilities
parser = argparse.ArgumentParser(description='PyTorch Classifier Utils!') parser.add_argument('--datasets', type=str, default="mnist", help="Datasets name.") parser.add_argument('--dataroot', type=str, default="test_imgs", help="Data folders to categorize.") parser.add_argument('--img_size', type=int, default=28, help="Data folders to categorize.") parser.add_argument('--channels', type=int, default=1, help="Number of channels in the image") parser.add_argument('--model_path', type=str, help="Load model path.") opt = parser.parse_args() print(opt) # set driver device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # load model if opt.datasets == "mnist": model = lenet().to(device) elif opt.datasets == "fmnist": model = resnet18().to(device) # prediction label names if opt.datasets == "mnist": classes_names = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] elif opt.datasets == "fmnist": classes_names = ["T-shirt", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Skirt", "Sneaker",
images, labels = tf.train.batch([image, label], batch_size=batch_size, num_threads=1, capacity=5 * batch_size) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) #img, lab = sess.run([images, labels]) #print 'first batch:' #print 'lab:', lab #print 'img: ', img #images = tf.cast(images, tf.float32) logits, endpoints = lenet.lenet(images) # convert prediction values for each class into single class prediction predictions = tf.to_int64(tf.argmax(logits, 1)) labels = tf.squeeze(labels) saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint(train_dir)) preResult = sess.run([predictions, labels]) print preResult #print sess.run(labels) # Define the metrics: #names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ # 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), # 'Recall_5': slim.metrics.streaming_recall_at_k(logits, labels, 5),
manualSeed = random.randint(1, 10000) random.seed(manualSeed) torch.manual_seed(manualSeed) cudnn.benchmark = True # setup gpu driver device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Load datasets train_dataloader, test_dataloader = load_datasets(opt.datasets, opt.dataroot, opt.batch_size) # Load model if opt.datasets == "mnist": if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(lenet()) else: model = lenet() elif opt.datasets == "fmnist" or opt.datasets == "kmnist": if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(resnet18()) else: model = resnet18() elif opt.datasets == "qmnist": if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(alexnet()) else: model = alexnet() else: model = "" print(opt)
config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) # tf Graph input X = tf.placeholder("float", [None, n_input, n_input, 3]) #X = tf.placeholder("float", [None, 784]) Y = tf.placeholder("float", [None, n_classes]) alpha_place_holder = tf.placeholder(tf.float32, shape=()) lr_place_holder = tf.placeholder(tf.float32, shape=()) with slim.arg_scope(lenet.lenet_arg_scope()): #training and test graph net_output, _ = lenet.lenet(X, embedding_dim=args.embedding_dim, use_bn=args.bn) # test_net_output, _ = lenet.lenet(X,embedding_dim = args.embedding_dim,\ reuse = True, is_training = False, use_bn = args.bn)# #retrieval layer with tf.variable_scope('retrieval'): retrieval_layer = layers.retrieval_layer_2(args.embedding_dim, n_classes) out_layer, bottleneck = retrieval_layer.get_output(net_output,\ alpha = alpha_place_holder, l2_norm = args.l2_norm,\ learn_norm = args.learn_norm, norm_weights = args.norm_weights) test_out_layer, test_bottleneck = retrieval_layer.get_output(test_net_output,\ alpha = alpha_place_holder, l2_norm = args.l2_norm,\ learn_norm = args.learn_norm, norm_weights = args.norm_weights)
def main(max_epoch, data_num): args = parser.parse_args() cluster_mode = args.cluster_mode if cluster_mode.startswith("yarn"): hadoop_conf = os.environ.get("HADOOP_CONF_DIR") assert hadoop_conf, "Directory path to hadoop conf not found for yarn-client mode. Please " \ "set the environment variable HADOOP_CONF_DIR" spark_conf = create_spark_conf().set("spark.executor.memory", "5g") \ .set("spark.executor.cores", 2) \ .set("spark.executor.instances", 2) \ .set("spark.driver.memory", "2g") if cluster_mode == "yarn-client": sc = init_nncontext(spark_conf, cluster_mode="yarn-client", hadoop_conf=hadoop_conf) else: sc = init_nncontext(spark_conf, cluster_mode="yarn-cluster", hadoop_conf=hadoop_conf) else: sc = init_nncontext() # get data, pre-process and create TFDataset (train_images_data, train_labels_data) = mnist.read_data_sets("/tmp/mnist", "train") (test_images_data, test_labels_data) = mnist.read_data_sets("/tmp/mnist", "train") train_images_data = (train_images_data[:data_num] - mnist.TRAIN_MEAN) / mnist.TRAIN_STD train_labels_data = train_labels_data[:data_num].astype(np.int) test_images_data = (test_images_data[:data_num] - mnist.TRAIN_MEAN) / mnist.TRAIN_STD test_labels_data = (test_labels_data[:data_num]).astype(np.int) dataset = TFDataset.from_ndarrays( (train_images_data, train_labels_data), batch_size=360, val_tensors=(test_images_data, test_labels_data)) # construct the model from TFDataset images, labels = dataset.tensors with slim.arg_scope(lenet.lenet_arg_scope()): logits, end_points = lenet.lenet(images, num_classes=10, is_training=True) loss = tf.reduce_mean( tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels)) acc = accuracy(logits, labels) # create a optimizer optimizer = TFOptimizer.from_loss(loss, Adam(1e-3), metrics={"acc": acc}, model_dir="/tmp/lenet/") # kick off training optimizer.optimize(end_trigger=MaxEpoch(max_epoch)) saver = tf.train.Saver() saver.save(optimizer.sess, "/tmp/lenet/model")