def test_full_net(): """ test whether an evaluable network can be created """ #import test_data = import_files.import_batch("/Users/admin/Documents/code/python/tensorflow/projects/CIFAR-10-convnet/Data/test/", 1, 10) #weights and biases layer1_weights = layer.init_weights(32, (4,4,3)) layer1_biases = layer.init_biases(32) layer2_weights = layer.init_weights(128, (8192)) layer2_biases = layer.init_biases(128) layer3_weights = layer.init_weights(10, (128)) layer3_biases = layer.init_biases(10) output = np.empty((10, 10)) for i in range(0,9): layer1 = layer.relu(layer.conv_layer(test_data[i], layer1_weights, layer1_biases, zero_pad_dimensions=(2,2), stride=(2,2))) layer2 = layer.relu(layer.fulcon_layer(layer1, layer2_weights, layer2_biases)) layer3 = layer.relu(layer.fulcon_layer(layer2, layer3_weights, layer3_biases)) print(layer3) output[i] = layer.softmax(layer3)
def decoder(self,input_z,name = 'generate_img',is_training = True): hidden_num = 64 output_dim = 64 with tf.variable_scope(name,reuse = tf.AUTO_REUSE): x = ly.fc(input_z, hidden_num * 8 * (output_dim // 16) * (output_dim // 16),name = 'gen_fc_0') x = tf.reshape(x, shape=[self.imle_deep, output_dim // 16, output_dim // 16, hidden_num * 8]) ## 4, 4, 8*64 x = ly.deconv2d(x,hidden_num * 4,name = 'g_deconv2d_0') ### 8,8, 256 x = ly.batch_normal(x,name = 'g_deconv_bn_0',is_training = is_training) x = ly.relu(x) x = ly.deconv2d(x,hidden_num * 2,name = 'g_deconv2d_1') ### 16,16, 128 x = ly.batch_normal(x,name = 'g_deconv_bn_1',is_training = is_training) x = ly.relu(x) x = ly.deconv2d(x,hidden_num,name = 'g_deconv2d_2') ### 32,32, 64 x = ly.batch_normal(x,name = 'g_deconv_bn_2',is_training = is_training) x = ly.relu(x) x = ly.deconv2d(x, 3, name = 'g_deconv2d_3') ### 64,64, 3 x = ly.batch_normal(x,name = 'g_deconv_bn_3',is_training = is_training) x = tf.nn.tanh(x) return x
def discriminator(self, x, name='discriminator_img', is_training=True): ## 64,64,3 with tf.variable_scope(name, reuse=tf.AUTO_REUSE): x = ly.conv2d(x, 64, strides=2, use_bias=True, name='d_conv_0') ## 32,32,64 x = ly.batch_normal(x, name='d_bn_0', is_training=is_training) x = ly.relu(x, 0.2) x = ly.conv2d(x, 128, strides=2, use_bias=True, name='d_conv_1') ## 16,16,128 x = ly.batch_normal(x, name='d_bn_1', is_training=is_training) x = ly.relu(x, 0.2) x = ly.conv2d(x, 256, strides=2, use_bias=True, name='d_conv_2') ## 8,8,256 x = ly.batch_normal(x, name='d_bn_2', is_training=is_training) x = ly.relu(x, 0.2) x = ly.conv2d(x, 512, strides=2, use_bias=True, name='d_conv_3') ## 4,4,512 x = ly.batch_normal(x, name='d_bn_3', is_training=is_training) x = ly.relu(x, 0.2) x = ly.fc(x, 1, name='fc_0') x = tf.nn.sigmoid(x) return x
def old_tests(): print(backprop.final_layer_error(layer.softmax(np.array([5,2,5,1])), np.array([0,1,0,0]), np.array([5,2,5,1]))) print(layer.conv_layer(np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]]), layer.init_weights(5, (2,2,3)), layer.init_biases(5), zero_pad_dimensions=(2,2))) print(layer.conv_layer( layer.relu( layer.conv_layer(np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]]), layer.init_weights(5, (2,2,3)), layer.init_biases(5), zero_pad_dimensions=(2,2))), layer.init_weights(32, (2,2,5)), layer.init_biases(32), zero_pad_dimensions=(1,1) ).shape) import_files.import_batch("/Users/admin/Documents/code/python/tensorflow/projects/CIFAR-10-convnet/Data/test/", 1, 256)
def __init__(self): super(CNN_modified,self).__init__() self.relu = layer.relu(2) self.conv1 = layer.Conv(1,32,kernel_size=5,padding=2,stride=1) self.pool1 = torch.nn.AvgPool2d(kernel_size=2,stride=2) self.conv2 = layer.Conv(32,64,kernel_size=5,padding=2,stride=1) self.pool2 = torch.nn.AvgPool2d(kernel_size=2,stride=2) self.dense1 = layer.Dense(7*7*64,1024) self.dense2 = layer.Dense(1024,10) self.precision = 0. self.epoch = 0
def run(model, train_set, vali_set, test_set): for epoch in range(1, 300): train_loss = train(model, train_set) vali_loss = validation(model, vali_set) accuracy = test(model, test_set) print("epoch:", epoch, "\ttrain_loss:", train_loss, "\tvali_loss:", vali_loss, "\taccuracy:", accuracy) lr = 0.01 model = net.model(optimizer.Adam(lr=lr)) # 30 66 #model = net.model(optimizer.GradientDescent(lr=lr)) #30번에 32퍼 학,검,테 데이터셋 128개일때 model.add(nn.conv2d(filters=32, kernel_size=[3,3], strides=[1,1], w_init=init.he)) model.add(nn.relu()) model.add(nn.maxpool2d(kernel_size=[2,2], strides=[2,2])) model.add(nn.dropout(0.6)) model.add(nn.conv2d(filters=64, kernel_size=[3,3], strides=[1,1], w_init=init.he)) model.add(nn.relu()) model.add(nn.maxpool2d(kernel_size=[2,2], strides=[2,2])) model.add(nn.dropout(0.6)) model.add(nn.conv2d(filters=128, kernel_size=[3,3], strides=[1,1], w_init=init.he)) model.add(nn.relu()) model.add(nn.maxpool2d(kernel_size=[2,2], strides=[2,2])) model.add(nn.dropout(0.6)) model.add(nn.flatten()) model.add(nn.affine(out_dim=10, w_init=init.he))
def classify(self, d_opt=None, name='classify', is_training=True): ### 64,64,1 with tf.variable_scope(name, reuse=tf.AUTO_REUSE): x = tf.pad(self.input_img, [[0, 0], [5, 5], [5, 5], [0, 0]], "REFLECT") x = ly.conv2d(x, 64, kernal_size=11, name='conv_0', padding='VALID', use_bias=True) x = ly.batch_normal(x, name='bn_0', is_training=is_training) x = ly.relu(x) x = ly.maxpooling2d(x) ## 32,32,64 x = tf.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT") x = ly.conv2d(x, 128, kernal_size=7, name='conv_1', padding='VALID', use_bias=True) x = ly.batch_normal(x, name='bn_1', is_training=is_training) x = ly.relu(x) x = ly.maxpooling2d(x) ## 16,16,128 x = tf.pad(x, [[0, 0], [2, 2], [2, 2], [0, 0]], "REFLECT") x = ly.conv2d(x, 256, kernal_size=5, name='conv_2', padding='VALID', use_bias=True) x = ly.batch_normal(x, name='bn_2', is_training=is_training) x = ly.relu(x) x = ly.maxpooling2d(x) ## 8,8,256 x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT") x = ly.conv2d(x, 512, kernal_size=3, name='conv_3', padding='VALID', use_bias=True) x = ly.batch_normal(x, name='bn_3', is_training=is_training) x = ly.relu(x) x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT") x = ly.conv2d(x, 512, kernal_size=3, name='conv_4', padding='VALID', use_bias=True) x = ly.batch_normal(x, name='bn_4', is_training=is_training) x = ly.relu(x) x = ly.maxpooling2d(x) ## 4,4,512 x = ly.fc(x, 1024, name='fc_0', use_bias=True) x = ly.batch_normal(x, name='bn_5', is_training=is_training) x = ly.relu(x) x = tf.nn.dropout(x, keep_prob=0.5) x = ly.fc(x, self.class_num, name='fc_1', use_bias=True) self.pred_x_index = tf.argmax(tf.nn.softmax(x), axis=-1) self.pred_x_value = tf.reduce_max(tf.nn.softmax(x), axis=-1) if (is_training): cross_loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2( labels=self.input_label, logits=x), axis=0) l2_loss = 0.0005 * tf.reduce_sum([ tf.nn.l2_loss(var) for var in self.get_single_var('classify/fc') ]) loss = cross_loss + l2_loss self.summaries.append(tf.summary.scalar('loss', loss)) _grad = d_opt.compute_gradients( loss, var_list=self.get_vars('classify')) train_op = d_opt.apply_gradients(_grad) return train_op