def __init__(self, class_no=14): print('create object _unet') self.class_no = class_no self.kernel_size1 = 1 self.kernel_size2 = 3 self.log_ext = '_' self.seed = 200 self.upsampling3d = upsampling() self.layers = layers.layers()
def __init__(self, class_no=14): print('create object _unet') self.class_no = class_no self.kernel_size1 = 1 self.kernel_size2 = 3 self.log_ext = '_' self.seed_no=200 self.upsampling3d=upsampling() self.maxpool = False self.norm_method = 'batch_normalization' self.layers=layers()
def __init__(self, graph, class_no=14): print('create object _unet') self.graph = graph self.class_no = class_no self.kernel_size1 = 1 self.kernel_size2 = 3 self.log_ext = '_' self.seed = 200 self.upsampling3d = upsampling() self.layers = layers() self.reuse = False self.trainable = True
def __init__(self, trainable, file_name): print('create object _unet') self.upsampling3d = upsampling() self.layers = layers() self.trainable = trainable self.seed = 200 self.kernel_loader = loader(file_name) # self.kernel_loader.print_tensors_in_checkpoint_file(file_name, False, # True, False) [ self.conv_init1_ld1, self.bias_init1_ld1, self.beta_init1_ld1, self.gamma_init1_ld1, self.moving_mean_init1_ld1, self.moving_var1_ld1, self.conv_init2_ld1, self.bias_init2_ld1, self.beta_init2_ld1, self.gamma_init2_ld1, self.moving_mean_init2_ld1, self.moving_var2_ld1, self.conv_init1_ld2, self.bias_init1_ld2, self.beta_init1_ld2, self.gamma_init1_ld2, self.moving_mean_init1_ld2, self.moving_var1_ld2, self.conv_init2_ld2, self.bias_init2_ld2, self.beta_init2_ld2, self.gamma_init2_ld2, self.moving_mean_init2_ld2, self.moving_var2_ld2, self.conv_init1_ld3, self.bias_init1_ld3, self.beta_init1_ld3, self.gamma_init1_ld3, self.moving_mean_init1_ld3, self.moving_var1_ld3, self.conv_init2_ld3, self.bias_init2_ld3, self.beta_init2_ld3, self.gamma_init2_ld3, self.moving_mean_init2_ld3, self.moving_var2_ld3 ] = self.kernel_loader.return_tensor_value_list_by_name([ 'U_LD_DS1/U_LD_DS1U_conv1_conv3d/kernel', 'U_LD_DS1/U_LD_DS1U_conv1_conv3d/bias', 'U_LD_DS1/U_LD_DS1U_conv1_bn/beta', 'U_LD_DS1/U_LD_DS1U_conv1_bn/gamma', 'U_LD_DS1/U_LD_DS1U_conv1_bn/moving_mean', 'U_LD_DS1/U_LD_DS1U_conv1_bn/moving_variance', 'U_LD_DS1/U_LD_DS1U_conv2_conv3d/kernel', 'U_LD_DS1/U_LD_DS1U_conv2_conv3d/bias', 'U_LD_DS1/U_LD_DS1U_conv2_bn/beta', 'U_LD_DS1/U_LD_DS1U_conv2_bn/gamma', 'U_LD_DS1/U_LD_DS1U_conv2_bn/moving_mean', 'U_LD_DS1/U_LD_DS1U_conv2_bn/moving_variance', 'U_LD_DS2/U_LD_DS2U_conv1_conv3d/kernel', 'U_LD_DS2/U_LD_DS2U_conv1_conv3d/bias', 'U_LD_DS2/U_LD_DS2U_conv1_bn/beta', 'U_LD_DS2/U_LD_DS2U_conv1_bn/gamma', 'U_LD_DS2/U_LD_DS2U_conv1_bn/moving_mean', 'U_LD_DS2/U_LD_DS2U_conv1_bn/moving_variance', 'U_LD_DS2/U_LD_DS2U_conv2_conv3d/kernel', 'U_LD_DS2/U_LD_DS2U_conv2_conv3d/bias', 'U_LD_DS2/U_LD_DS2U_conv2_bn/beta', 'U_LD_DS2/U_LD_DS2U_conv2_bn/gamma', 'U_LD_DS2/U_LD_DS2U_conv2_bn/moving_mean', 'U_LD_DS2/U_LD_DS2U_conv2_bn/moving_variance', 'U_LD_US1/U_LD_US1U_conv1_conv3d/kernel', 'U_LD_US1/U_LD_US1U_conv1_conv3d/bias', 'U_LD_US1/U_LD_US1U_conv1_bn/beta', 'U_LD_US1/U_LD_US1U_conv1_bn/gamma', 'U_LD_US1/U_LD_US1U_conv1_bn/moving_mean', 'U_LD_US1/U_LD_US1U_conv1_bn/moving_variance', 'U_LD_US1/U_LD_US1U_conv2_conv3d/kernel', 'U_LD_US1/U_LD_US1U_conv2_conv3d/bias', 'U_LD_US1/U_LD_US1U_conv2_bn/beta', 'U_LD_US1/U_LD_US1U_conv2_bn/gamma', 'U_LD_US1/U_LD_US1U_conv2_bn/moving_mean', 'U_LD_US1/U_LD_US1U_conv2_bn/moving_variance', ])
unet = _unet(trainable=False) if save_pb: if not os.path.exists(path + '/pbs'): os.makedirs(path + '/pbs') freeze_graph(half_unet_graph_chckpnt_dir, path + '/pbs/{}.pb'.format('jpg'), 'U_y/U_y_conv3d/bias') #======================== # show all nodes of a graph AA = [n.name for n in tf.get_default_graph().as_graph_def().node] for i in AA: print(i) #======================== layers = layers() X = tf.placeholder(tf.float32, shape=[None, None, None, None, 1], name='synth_img_row1') conv1 = layers.conv3d(input, filters=10, kernel_size=3, padding='same', dilation_rate=1, is_training=True, trainable='True', scope='conv1', reuse='False') unet.unet(conv1) # #