def runTensorflow(self, images): with tf.Session() as sess: with tf.device("/cpu:0"): # Tensorflow does not know how to release /GPU:0 resources # without process termination # Issue #1727 in the tensorflow/tensorflow git repository # Letting it just use CPU for this forward run instead img = tf.placeholder(tf.float32, shape=[1, 224, 224, 3], name="images") model = vgg.VGG() model.buildGraph(img, network_version="VGG19") images = images.reshape((1, 224, 224, 3)) sess.run(tf.global_variables_initializer()) # print self.model.layers.keys() self.output = sess.run( [model.layers[_] for _ in self.testLayers], feed_dict={img: images}) sess.close() return
def runGraph(self, im): with self.test_session() as sess, tf.device("/gpu:0"): img = tf.placeholder(tf.float32, im_data.shape, name="images") net = vgg.VGG("vgg16test_1") net.buildGraph(img, train=False, weightsPath=s.DEF_FRCNN_WEIGHTS_PATH, biasesPath=s.DEF_FRCNN_BIASES_PATH, network_version="VGG16CONV") sess.run(tf.global_variables_initializer()) output = sess.run(net.layers['relu4_1'], feed_dict={img: im_data}) sess.close() # Does output come in list form if only one output is produced? [probably] # Blob name is conv4_1, not relu4_1; relu is done in-place by caffe return output
def runGraph(self): with self.test_session() as sess, tf.device("/gpu:0"): try: conv4_1_in = self.reference_activations['conv4_1'] except KeyError: print( "Warning: conv4_1 not found in reference_activations. Something \ wrong with .npz file") net = vgg.VGG(namespace="vgg16test_2") net.buildGraph(tf.placeholder(dtype=tf.float32), train=False, weightsPath=s.DEF_FRCNN_WEIGHTS_PATH, biasesPath=s.DEF_FRCNN_BIASES_PATH, network_version="VGG16CONV") conv4_1 = net.layers['conv4_1'] sess.run(tf.global_variables_initializer()) output = sess.run(net.layers['relu5_3'], feed_dict={conv4_1: conv4_1_in}) sess.close() return output
if shuffle: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, val_indices = indices[split:], indices[:split] # Creating PT data samplers and loaders: train_sampler = SubsetRandomSampler(train_indices) valid_sampler = SubsetRandomSampler(val_indices) trainloader = dataset.CSILoader(data, opt,sampler=train_sampler) testloader = dataset.CSILoader(data, opt,sampler=valid_sampler) print('==> Building model..') # net = VGG('VGG19') net = vgg.VGG('VGG11') # net = ResNet18() # net = GoogLeNet() # net = DenseNet121() # net = ResNeXt29_2x64d() # net = MobileNet() # net = DPN92() # net = ShuffleNetG2() # net = SENet18() # result_folder = './results/' # if not os.path.exists(result_folder): # os.makedirs(result_folder) # # logname = result_folder + net.__class__.__name__ + '_' + opt.sess + '_' + str(opt.seed) + '.csv'
# dataset_size = len(data) # indices = list(range(dataset_size)) # split = 0.8 # split = int(np.floor(split * dataset_size)) # print(split) # if shuffle: # np.random.seed(opt.seed) # np.random.shuffle(indices) #train_indices, val_indices = indices[split:], indices[:split] # train_indices, val_indices = indices[:split], indices[split:] trainloader = dataset.CSILoader(data_train, opt, shuffle=True) testloader = dataset.CSILoader(data_test, opt, shuffle=True) print('==> Building model..') net = vgg.VGG('VGG11', linear_in=2048) # net = ResNet18() #net = LeNet.LeNet(in_channel=3, linear_in=9216) # net = DenseNet121() # net = ResNeXt29_2x64d() # net = MobileNet() # net = DPN92() # net = ShuffleNetG2() # net = SENet18() # result_folder = './results/' # if not os.path.exists(result_folder): # os.makedirs(result_folder) # # logname = result_folder + net.__class__.__name__ + '_' + opt.sess + '_' + str(opt.seed) + '.csv'
def testSave(self): # Layers to be tested testLayers = ['conv1_1', 'conv2_1', 'conv3_3', 'conv4_3', 'fc6', 'fc7'] img = tf.placeholder(tf.float32, [1, 224, 224, 3], name="images") model = vgg.VGG("frcnn") model.buildGraph(img, network_version="VGG16", weightsPath=s.DEF_FRCNN_WEIGHTS_PATH, biasesPath=s.DEF_FRCNN_BIASES_PATH) testLayerVariables = [] for layer in testLayers: testLayerVariables.append("frcnn/" + layer + "/Weights:0") testLayerVariables.append("frcnn/" + layer + "/Bias:0") # Find the weights and biases for several layers in a model weightsFn = None biasFn = None try: with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(testLayerVariables) randNum = None while True: # Try different file names until you find one that isn't taken randNum = random.random() weightsFn = str(randNum) + "testWeights" biasFn = str(randNum) + "testBias" if os.path.isfile("models/" + weightsFn) or os.path.isfile("models/" + biasFn): continue else: model.save(weightsFn, biasFn) break # Load the weights/biases into a new model model2 = vgg.VGG() model2.buildGraph(img, train=False, weightsPath="models/" + weightsFn + ".npz", biasesPath="models/" + biasFn + ".npz") with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output2 = sess.run(testLayerVariables) self.assertEqual(len(output2), 2 * len(testLayers), msg="Incorrect number of output layers") # Check to make sure that the values are the same for i, var in enumerate(output2): np.testing.assert_equal( output[i], output2[i], err_msg="Output number %i was not equal" % i) finally: if weightsFn is not None and os.path.isfile("models/" + weightsFn + ".npz"): os.remove("models/" + weightsFn + ".npz") if biasFn is not None and os.path.isfile("models/" + biasFn + ".npz"): os.remove("models/" + biasFn + ".npz")
best_acc = 0 use_cuda = torch.cuda.is_available() opt = parser.parse_args() torch.manual_seed(opt.seed) # Creating data indices for training and validation splits: data_train = dataset.CSISet(data_x_train, data_y_train) data_test = dataset.CSISet(data_x_test, data_y_test) trainloader = dataset.CSILoader(data_train, opt, shuffle=True) testloader = dataset.CSILoader(data_test, opt, shuffle=True) print('==> Building model..') # net = VGG('VGG19') net = vgg.VGG('VGG11', in_channels=16, num_classes=9, linear_in=1536) # net = ResNet18() # net = GoogLeNet() # net = DenseNet121() # net = ResNeXt29_2x64d() # net = MobileNet() # net = DPN92() # net = ShuffleNetG2() # net = SENet18() # result_folder = './results/' # if not os.path.exists(result_folder): # os.makedirs(result_folder) # # logname = result_folder + net.__class__.__name__ + '_' + opt.sess + '_' + str(opt.seed) + '.csv'