correct_pred = tf.equal(tf.argmax(model, 1), tf.argmax(outputs, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) cifar = Cifar(batch_size=batch_size) cifar.create_resized_test_set(dim=n_classes) init = tf.initialize_all_variables() with tf.Session() as sess: sess.run(init) run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True) for epoch in range(no_of_epochs): for i in tqdm(range(cifar.no_of_batches), desc="Epoch {}".format(epoch), unit=" batch "): this_batch = cifar.batch(i) input_batch, out = helper.reshape_batch(this_batch, (image_size, image_size), n_classes) sess.run([optimizer], feed_dict={ pretrained.x: input_batch, outputs: out }, options=run_options) acc, loss = sess.run([accuracy, cost], feed_dict={ pretrained.x: input_batch, outputs: out
#outputs = tf.placeholder(tf.float32, [None, n_classes]) # ==================== # config dataset # ==================== print('Prepare dataset') train_dataset = Cifar(batch_size = batch_size) init = tf.initialize_all_variables() trian_features = None train_label = None with tf.Session(config=config) as sess: sess.run(init) for i in tqdm(range(20), unit=" batch "): this_batch = train_dataset.batch(i) train_X, train_y = helper.reshape_batch(this_batch, (image_size, image_size), n_classes) train_y = [np.argmax(element) for element in train_y] features = sess.run( [extractor], feed_dict={ model.input_images: train_X }) if trian_features is None: trian_features = features[0] train_label = train_y else: trian_features = np.concatenate((trian_features, features[0]), axis=0) train_label += train_y