def main(): # load vgg network extractor = utils.StyleContentModel(style_layers, content_layers) style_features_avg = [0.0] * len(style_layers) for i in range(1,N_IMG): if clinical: image_path = 'img/clinical_us/training_set/' + format(i, '03d') + '_HC.png' style_image = utils.image_preprocessing(image_path, 'clinical', [540, 800], c=3) else: image_path = 'img/data/new_att_all/' + str(i) + '.png' style_image = utils.image_preprocessing(image_path, 'hq', [1000, 1386], c=3) print(image_path) style_features = extractor(style_image)['style'] style_features_list = [style_features[name] for name in style_features.keys()] for j in range(5): style_features_avg[j] += style_features_list[j] style_features_avg = [ft/(N_IMG - 1) for ft in style_features_avg] style_dict = {name: value for name, value in zip(style_layers, style_features_avg)} if clinical: filename = 'models/nst/us_clinical_ft_dict.pickle' else: filename = 'models/nst/us_hq_ft_dict.pickle' with open(filename, 'wb') as handle: pickle.dump(style_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __init__(self, source_image, observed_layers, n_bins=128): """ :param source_image: source image :type image: PIL Image object :param observed_layers: dictionary containing layer-specific information ; see bottom of file decoders.py :type observed_layers: dictionary :param n_bins: number of transportation histogram bins, defaults to 128 [TO BE IMPLEMENTED] :type n_bins: int, optional """ # source image self.source_tensor = image_preprocessing(source_image) self.normalized_source_batch = vgg_normalization( self.source_tensor).unsqueeze(0) self.source_batch = self.source_tensor.unsqueeze(0) # set encoder self.encoder = vgg19(pretrained=True).float() self.encoder.eval() for param in self.encoder.features.parameters(): param.requires_grad = False self.encoder_layers = {} self.set_encoder_hooks(observed_layers) self.n_bins = n_bins
def load_image_and_label(data, hist_template, total_files_to_read, ind, folderPath): label_enry = data filename = os.path.join(folderPath, label_enry[0]) try: # im = ndimage.imread(filename, True, 'L') im = cv2.imread(filename, cv2.IMREAD_GRAYSCALE).astype(np.float32) im = hist_match(im, hist_template) im = misc.imresize(im, (conf.IMAGE_X_DIM, conf.IMAGE_Y_DIM), 'bilinear') im = image_preprocessing(im) im = im.reshape(conf.IMAGE_X_DIM, conf.IMAGE_Y_DIM, 1) printProgressBar(ind + 1, total_files_to_read, prefix='Loaded files:', suffix='Complete', length=50) return im except (FileNotFoundError, OSError, AttributeError): print('Missing {0}'.format(str(filename))) except ValueError as e: print('Value error: ' + str(e))
def __init__(self, style_image, content_image, observed_layers, n_bins=128, decoder_weights_path=None): super().__init__(style_image, observed_layers, n_bins=n_bins) # input image self.content_tensor = image_preprocessing(content_image) self.normalized_content_batch = vgg_normalization( self.content_tensor).unsqueeze(0) self.content_batch = self.content_tensor.unsqueeze(0)
def __init__(self,model_path,factor,iter,lr,tv_coeff,tv_beta,\ l1_coeff,img_path,perturb): self.model_path = model_path self.factor = factor self.iter = iter self.lr = lr self.tv_coeff = tv_coeff self.tv_beta = tv_beta self.l1_coeff = l1_coeff self.img_path = img_path self.model = load_model(self.model_path) self.original_img = perturbation(self.img_path, 'original', None) self.original_img_tensor = image_preprocessing(self.original_img) self.perturbed_img = perturbation(self.img_path, perturb, 5) self.perturbed_img_tensor = image_preprocessing(self.perturbed_img)
def __init__(self, style_image, content_image, observed_layers, n_bins=128): """ :param content_image: content image :type content_image: PIL Image object """ super().__init__(style_image, observed_layers, n_bins=n_bins) # input image self.content_tensor = image_preprocessing(content_image) self.normalized_content_batch = vgg_normalization( self.content_tensor).unsqueeze(0) self.content_batch = self.content_tensor.unsqueeze(0)
def __init__(self, image, observed_layers, n_bins=128): # source image self.source_tensor = image_preprocessing(image) self.normalized_source_batch = vgg_normalization( self.source_tensor).unsqueeze(0) self.source_batch = self.source_tensor.unsqueeze(0) # set encoder self.encoder = vgg19(pretrained=True).float() self.encoder.eval() for param in self.encoder.features.parameters(): param.requires_grad = False self.encoder_layers = {} self.set_encoder_hooks(observed_layers) self.n_bins = n_bins
def __getImagesAndLabels(self, data_size, pos_locations, neg_locations): assert len(pos_locations) > 0 assert len(neg_locations) > 0 filename = os.path.join(self.__imagesFolderPath, "00000003_006.png") hist_template = ndimage.imread(filename, True, 'L') if data_size is None: pos_indexes = range(0, len(pos_locations)) neg_indexes = range(0, len(neg_locations)) else: pos_indexes = random.sample(range(1, len(pos_locations)), int(data_size * self.__classSplit)) neg_indexes = random.sample( range(1, len(neg_locations)), int(data_size * (1. - self.__classSplit))) pos_locations_selected = [] for ind in pos_indexes: pos_locations_selected.append(pos_locations[ind]) neg_locations_selected = [] for ind in neg_indexes: neg_locations_selected.append(neg_locations[ind]) pos_locations = pos_locations_selected neg_locations = neg_locations_selected images = [] labels = [] total_files_to_read = len(pos_locations) + len(neg_locations) for ind, label_enry in enumerate(pos_locations): filename = os.path.join(self.__imagesFolderPath, label_enry[0]) try: # im = ndimage.imread(filename, True, 'L') im = cv2.imread(filename, cv2.IMREAD_GRAYSCALE).astype(np.float32) im = hist_match(im, hist_template) im = misc.imresize(im, (conf.IMAGE_X_DIM, conf.IMAGE_Y_DIM), 'bilinear') im = image_preprocessing(im) im = im.reshape(conf.IMAGE_X_DIM, conf.IMAGE_Y_DIM, 1) images.append(im) labels.append( self.__encode_chestx_label(label_enry[1], conf.POSITIVE_LABELS, conf.NEGATIVE_LABELS)) printProgressBar(ind + 1, total_files_to_read, prefix='Loaded files:', suffix='Complete', length=50) except (FileNotFoundError, OSError, AttributeError): print('Missing {0}'.format(str(filename))) except ValueError as e: print('Value error: ' + str(e)) for ind, label_enry in enumerate(neg_locations): filename = os.path.join(self.__imagesFolderPath, label_enry[0]) try: # im = ndimage.imread(filename, True, 'L') im = cv2.imread(filename, cv2.IMREAD_GRAYSCALE).astype(np.float32) im = hist_match(im, hist_template) im = misc.imresize(im, (conf.IMAGE_X_DIM, conf.IMAGE_Y_DIM), 'bilinear') im = image_preprocessing(im) im = im.reshape(conf.IMAGE_X_DIM, conf.IMAGE_Y_DIM, 1) images.append(im) labels.append(0) printProgressBar(len(pos_locations) + ind + 1, total_files_to_read, prefix='Loaded files:', suffix='Complete', length=50) except (FileNotFoundError, OSError, AttributeError): print('Missing {0}'.format(str(filename))) except ValueError as e: print('Value error: ' + str(e)) # locations = pos_locations + neg_locations images, labels = shuffle(images, labels) x = np.asarray(images) x = x.reshape(-1, conf.IMAGE_X_DIM, conf.IMAGE_Y_DIM, 1).astype('float32') y = np.asarray(labels) y = to_categorical(y, conf.NUM_CLASSES) return x, y
def __init__(self, params, variables): """ Initializes all necessary components of the TensorFlow Graph. """ # Assign required variables first self.varsM = variables ''' https://github.com/mbrufau7/tfm_food_segm/blob/master/W_net_Unsupervised_%26_Centroid_Loss_1_2.ipynb ''' # INITIALIZE GRAPH self.graph = tf.Graph() with self.graph.as_default(): self.n_input_variables = len(self.varsM) # Placeholders #self.input_images = tf.placeholder(tf.float32, shape=(None, None,None,None, self.n_input_variables), # name='input_images') self.input_images = tf.placeholder(tf.float32, shape=(None, None, self.n_input_variables), name='input_images') self.z_voxels = tf.placeholder(tf.int32, name='z_voxels') self.y_voxels = tf.placeholder(tf.int32, name='y_voxels') self.x_voxels = tf.placeholder(tf.int32, name='x_voxels') self.phase = tf.placeholder(tf.bool, name='phase') self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') params_z_proc = params.image_params.CROP_PAD_IMAGE_Z params_y_proc = params.image_params.CROP_PAD_IMAGE_Y params_x_proc = params.image_params.CROP_PAD_IMAGE_X # shape = tf.shape(self.input_images) self.org_x = self.x_voxels # shape[3] self.org_y = self.y_voxels # shape[2] self.org_z = self.z_voxels # shape[1] self.input_processed = image_preprocessing( self.input_images, params_z_proc, params_y_proc, params_x_proc, self.n_input_variables, self.org_z, self.org_y, self.org_x) # Global step - feed it in so no incrementing necessary # self.global_step_m = tf.placeholder(tf.int32) global_step = tf.Variable(0.0, trainable=False) def shape_so(tensor): # s = tensor.get_shape() # return tuple([s[i].value for i in range(0,len(s))]) return tuple([d.value for d in tensor.get_shape()]) def conv_block(inputs, filters, prev_filters, kernel, activation, phase, dil_rate, tag=None): # @TODO 'channels_last' is default? is this acutally a separable conv net_2 = tf.layers.conv3d( inputs, filters, kernel, strides=[1, 1, 1], dilation_rate=dil_rate, padding='SAME', activation=activation, kernel_initializer=keras.initializers.he_normal(), data_format='channels_last', name=name_tag('conv3d_1', tag)) # net_2 = tf.layers.max_pooling3d(net_2, pool_size=kernel, strides = [1, 1, 1], # padding = 'SAME', data_format='channels_last', # name=name_tag('max_pooling3d', tag)) net_2 = tf.layers.conv3d( net_2, filters, kernel, strides=[1, 1, 1], dilation_rate=[1, 1, 1], padding='SAME', activation=activation, kernel_initializer=keras.initializers.he_normal(), data_format='channels_last', name=name_tag('conv3d_2', tag)) net_2 = tf.layers.max_pooling3d(net_2, pool_size=kernel, strides=[2, 2, 2], padding='SAME', data_format='channels_last', name=name_tag( 'max_pooling3d', tag)) # net_2 = tf.layers.batch_normalization(net_2, center=True, scale=True, training=phase # ,name=name_tag('batch_norm', tag) # net_2 = tf.layers.dropout(net_2,rate=0.20, training=phase, # name=name_tag('dropout', tag)) return net_2 def deconv_block(inputs, filters, prev_filters, kernel, activation, phase, dil_rate, tag=None): ''' @TODO 'channels_last' is default? is this acutally a separable conv. W-Net : https://arxiv.org/pdf/1711.08506.pdf U-Net : https://arxiv.org/pdf/1505.04597.pdf U-ENC for W-Net should be: depthwise separable conv -> depthwise separable conv -> dconv One important modification in our architecture is that all of the modules use the depthwise separable convolution layers introduced in U-Net except modules 1, 9, 10, and 18. A depthwise separable convolution operation consists of a depthwise convolution and a pointwise convolution. The idea behind such an operation is to examine spatial cor-relations and cross-channel correlations independently a depthwise convolution performs spatial convolutions independently over each channel and then a pointwise convolution projects the feature channels by the depthwise convolution onto a new channel space. As a consequence, the network gains performance more efficiently with the same number of parameters. ''' # net_3 = tf.layers.conv3d(inputs, filters, kernel, strides=[1, 1,1], dilation_rate=[1, 1, 1], # padding='SAME', activation=activation, # kernel_initializer=keras.initializers.he_normal() , # data_format='channels_last') net_3 = tf.layers.conv3d_transpose( inputs, filters, kernel, strides=[2, 2, 2], padding='SAME', activation=activation, kernel_initializer=keras.initializers.he_normal(), data_format='channels_last', name=name_tag('dconv3d', tag)) # net_3 = tf.layers.max_pooling3d(net_3, pool_size=kernel, # strides = [1, 1, 1], padding = 'SAME', data_format='channels_last') net_3 = tf.layers.conv3d( net_3, filters, kernel, strides=[1, 1, 1], padding='SAME', activation=activation, kernel_initializer=keras.initializers.he_normal(), data_format= 'channels_last', # @TODO: conv or separable conv? name=name_tag('conv3d', tag)) net_3 = tf.layers.max_pooling3d(net_3, pool_size=kernel, strides=[1, 1, 1], padding='SAME', data_format='channels_last', name=name_tag( 'max_pooling3d', tag)) # net_3 = tf.layers.batch_normalization(net_3, center=True, scale=True, training=phase) # net_3 = tf.layers.dropout(net_3,rate=0.20, training=phase) return net_3 def middle_block(inputs, filters, prev_filters, kernel, activation, phase, tag=None): net_1 = tf.layers.conv3d( inputs, filters, kernel, strides=[1, 1, 1], dilation_rate=[1, 1, 1], padding='SAME', activation=activation, kernel_initializer=keras.initializers.lecun_normal(), data_format='channels_last', name=name_tag('conv3d_1', tag)) # net_1 = tf.layers.max_pooling3d(net_1, pool_size=kernel, # strides = [1, 1, 1], padding = 'SAME', data_format='channels_last', # name=name_tag('max_pooling3d', tag)) net_1 = tf.layers.conv3d( net_1, filters, kernel, strides=[1, 1, 1], dilation_rate=[1, 1, 1], padding='SAME', activation=activation, kernel_initializer=keras.initializers.he_normal(), data_format='channels_last', name=name_tag('conv3d_2', tag)) net_1 = tf.layers.max_pooling3d(net_1, pool_size=kernel, strides=[1, 1, 1], padding='SAME', name=name_tag( 'max_pooling3d', tag)) # net_1 = tf.layers.batch_normalization(net_1, center=True, scale=True, training=phase) # net_1 = tf.layers.dropout(net_1,rate=0.20, training=phase) return net_1 def middle_block_fc(inputs, filters, prev_filters, kernel, activation, phase, tag=None): net_1 = tf.layers.conv3d( inputs, filters, kernel, strides=[1, 1, 1], padding='SAME', activation=activation, kernel_initializer=keras.initializers.he_normal(), name=name_tag('conv3d_1', tag)) net_1 = tf.layers.flatten(net_1) # @TODO refactor hardcoded dimensions? 6, 12, 6 = 432 net_1 = tf.layers.dense( net_1, 432, activation=activation, kernel_initializer=keras.initializers.lecun_normal(), name=name_tag('fc', tag)) # @TODO refactor hardcoded dimensions? 6, 12, 6 = 432 net_1 = tf.reshape(net_1, shape=(-1, 6, 12, 6, 1), name=name_tag('reshape', tag)) net_1 = tf.layers.conv3d( net_1, prev_filters, kernel, strides=[1, 1, 1], padding='SAME', activation=activation, kernel_initializer=keras.initializers.he_normal(), name=name_tag('conv3d_2', tag)) # net_1 = tf.layers.batch_normalization(net_1, center=True, scale=True, training=phase # name=name_tag('bn', tag)) # net_1 = tf.layers.dropout(net_1,rate=0.20, training=phase # name=name_tag('dropout', tag)) return net_1 def wnet(inputs, z, y, x, phase, keep_prob, params, n_input_variables): # encoder with tf.name_scope("U-encoder") as scope: print('inputs {}', shape_so(inputs)) net_e1_1 = conv_block( inputs, filters=params.graph_params.LAYER_1, prev_filters=params.graph_params.LAYER_1, kernel=params.graph_params.KERNEL1, activation=tf.nn.leaky_relu, phase=phase, dil_rate=[1, 1, 1], tag='uenc_conv_block_1') print('e1_1 {}', shape_so(net_e1_1)) net_e2_1 = conv_block( net_e1_1, filters=params.graph_params.LAYER_2, prev_filters=params.graph_params.LAYER_2, kernel=params.graph_params.KERNEL1, activation=tf.nn.leaky_relu, phase=phase, dil_rate=[1, 1, 1], tag='uenc_conv_block_2') # print('e2_1 {}',shape_so(net_e2_1)) net_e3_1 = conv_block( net_e2_1, filters=params.graph_params.LAYER_3, prev_filters=params.graph_params.LAYER_3, kernel=params.graph_params.KERNEL2, activation=tf.nn.leaky_relu, phase=phase, dil_rate=[1, 1, 1], tag='uenc_conv_block_3') print('e3_1 {}', shape_so(net_e3_1)) ## middle layer # inputs,filters,prev_filters,kernel,activation,phase net_m1_1 = middle_block( net_e3_1, filters=params.graph_params.LAYER_4, prev_filters=params.graph_params.LAYER_3, kernel=params.graph_params.KERNEL2, activation=tf.nn.leaky_relu, phase=phase, tag='uenc_conv_middle_block_4') print('m1_1 {}', shape_so(net_m1_1)) # net_c3_1 = tf.concat([net_m1_1, net_e3_1], axis=-1) # inputs,filters,prev_filters,kernel,activation,phase,dil_rate net_d3_1 = deconv_block( net_m1_1, filters=params.graph_params.LAYER_3, prev_filters=params.graph_params.LAYER_2, kernel=params.graph_params.KERNEL2, activation=tf.nn.leaky_relu, phase=phase, dil_rate=[1, 1, 1], tag='uenc_dconv_block_5') # net_c2_1 = tf.concat([net_d3_1, net_e2_1], axis=-1) # net_d2_1 = deconv_block( net_d3_1, filters=params.graph_params.LAYER_2, prev_filters=params.graph_params.LAYER_1, kernel=params.graph_params.KERNEL1, activation=tf.nn.leaky_relu, phase=phase, dil_rate=[1, 1, 1], tag='uenc_dconv_block_6') # net_c1_1 = tf.concat([net_d2_1,net_e1_1], axis=-1) # net_d1_1 = deconv_block(net_d2_1, params.graph_params.LAYER_1, params.graph_params.LAYER_1, params.graph_params.KERNEL1, tf.nn.leaky_relu, phase, [1, 1, 1], tag='uenc_dconv_block_7') # net_d1_1 = tf.layers.batch_normalization(net_d1_1, center=True, scale=True, # training=phase, momentum=0.90) # final layer for first U # net_c0_1 = tf.concat([net_d1_1,net_a], axis=-1) net_feed = tf.layers.conv3d( net_d1_1, params.graph_params.N_CLASSES, params.graph_params.KERNEL2, dilation_rate=[1, 1, 1], strides=[1, 1, 1], padding='SAME', activation=tf.nn.softmax, kernel_initializer=keras.initializers.he_normal(), data_format='channels_last', name=name_tag('conv3d_final_layer', 'uenc_conv_block_7')) # net_feed = tf.nn.softmax(net_feed, axis=4) # decoder with tf.name_scope("U-decoder"): net_d_1 = tf.layers.conv3d(net_feed, params.graph_params.LAYER_1, params.graph_params.KERNEL1, dilation_rate=[1, 1, 1], strides=[1, 1, 1], padding='SAME', activation=tf.nn.leaky_relu, name=name_tag( 'conv3d_first_layer', 'udec_conv_block_1')) # inputs,filters,prev_filters,kernel,activation,phase,dil_rate,tag = None net_de1_1 = conv_block( net_d_1, filters=params.graph_params.LAYER_1, prev_filters=params.graph_params.LAYER_1, kernel=params.graph_params.KERNEL1, activation=tf.nn.leaky_relu, phase=phase, dil_rate=[1, 1, 1], tag='udec_conv_block_1') # net_bridge1 = tf.concat([net_de1_1,net_e1_1], axis=-1) net_de2_1 = conv_block( net_de1_1, filters=params.graph_params.LAYER_2, prev_filters=params.graph_params.LAYER_2, kernel=params.graph_params.KERNEL1, activation=tf.nn.leaky_relu, phase=phase, dil_rate=[1, 1, 1], tag='udec_conv_block_2') # net_bridge2 = tf.concat([net_de2_1,net_e2_1], axis=-1) net_de3_1 = conv_block(net_de2_1, params.graph_params.LAYER_3, params.graph_params.LAYER_3, params.graph_params.KERNEL2, tf.nn.leaky_relu, phase, [1, 1, 1], tag='udec_conv_block_3') # net_bridge3 = tf.concat([net_de3_1,net_e3_1], axis=-1) ## middle layer # net_cA = tf.concat([net_de3_1, net_m1_1], axis=-1) net_dm1_1 = middle_block(net_de3_1, params.graph_params.LAYER_4, params.graph_params.LAYER_3, params.graph_params.KERNEL2, tf.nn.leaky_relu, phase, tag='udec_conv_middle_block_4') # net_dc3_1 = tf.concat([net_dm1_1, net_de3_1], axis=-1) # #net_e3_1 , net_m1_1 net_dd3_1 = deconv_block(net_dm1_1, params.graph_params.LAYER_3, params.graph_params.LAYER_2, params.graph_params.KERNEL2, tf.nn.leaky_relu, phase, [1, 1, 1], tag='udec_dconv_block_5') # net_dc2_1 = tf.concat([net_dd3_1, net_de2_1], axis=-1) # #net_e2_1, , neprint(total_out[1].get_shape())t_d3_1 net_dd2_1 = deconv_block(net_dd3_1, params.graph_params.LAYER_2, params.graph_params.LAYER_1, params.graph_params.KERNEL1, tf.nn.leaky_relu, phase, [1, 1, 1], tag='udec_dconv_block_6') # net_dc1_1 = tf.concat([net_dd2_1,net_de1_1], axis=-1) # #net_e1_1 , net_d2_1 net_dd1_1 = deconv_block(net_dd2_1, params.graph_params.LAYER_1, params.graph_params.LAYER_1, params.graph_params.KERNEL1, tf.nn.leaky_relu, phase, [1, 1, 1], tag='udec_dconv_block_7') # net_dd1_1 = tf.layers.batch_normalization(net_dd1_1, center=True, scale=True, training=phase, momentum=0.90) # final layer for second U # net_t = tf.concat([net_dd1_1, net_feed], axis=-1) net_r = tf.layers.conv3d( net_dd1_1, n_input_variables, params.graph_params.KERNEL2, dilation_rate=[1, 1, 1], strides=[1, 1, 1], padding='SAME', activation=tf.nn.leaky_relu, kernel_initializer=keras.initializers.he_normal(), name=name_tag('conv3d_final_layer', 'udec_conv_block_7')) return net_feed, net_r # Network total_out = wnet(self.input_processed, params.image_params.CROP_PAD_IMAGE_Z, params.image_params.CROP_PAD_IMAGE_Y, params.image_params.CROP_PAD_IMAGE_X, self.phase, self.keep_prob, params, self.n_input_variables) print(total_out[0].get_shape()) print(total_out[1].get_shape()) wnet_categories = total_out[0] predictionsOut_1 = tf.argmax(wnet_categories, axis=4) predictionsOut_1 = tf.expand_dims(predictionsOut_1, axis=4) predictionsOut_1 = tf.reshape( predictionsOut_1, shape=(-1, params.image_params.CROP_PAD_IMAGE_X, params.image_params.CROP_PAD_IMAGE_Y, params.image_params.CROP_PAD_IMAGE_Z, 1)) # @TODO is this the how depthwise separable conv is being handled? predictionsOut_1 = tf.transpose( predictionsOut_1, perm=[0, 3, 2, 1, 4]) # @TODO Parameterize these predictionsOut_1 = tf_crop_or_pad_along_axis( predictionsOut_1, self.org_z, 1) predictionsOut_1 = tf_crop_or_pad_along_axis( predictionsOut_1, self.org_y, 2) predictionsOut_1 = tf_crop_or_pad_along_axis( predictionsOut_1, self.org_x, 3) # predictionsOut_1 = tf.transpose(predictionsOut_1, perm = [0,3,2,1,4]) predictionsOut_1 = tf.reshape(predictionsOut_1, shape=(-1, self.org_z, self.org_y, self.org_x)) predictionsOut = tf.reshape(tf.cast(predictionsOut_1, tf.int64), shape=(-1, self.org_x * self.org_y * self.org_z), name='outputs') probs = tf.identity( wnet_categories, name='probs' ) # tf.nn.softmax(wnet_categories, axis=2, name = 'probs') ## decoder - output wnet_original = tf.identity(total_out[1], name='decoder_outputs') original_image = tf.identity(self.input_processed, name='encoder_inputs') unprocessed_image = tf.identity(self.input_images, name='process_inputs') loss_dec = tf.losses.mean_squared_error(self.input_processed, wnet_original) print(loss_dec.get_shape()) loss_enc = tf.map_fn( elems=np.arange(params.runtime_params.BATCH_SIZE), fn=lambda i: centroids_similarity_loss( self.input_processed[i, :, :, :, :], wnet_categories[ i, :, :, :, :], params.image_params.CROP_PAD_IMAGE_Z, params.image_params.CROP_PAD_IMAGE_Y, params.image_params. CROP_PAD_IMAGE_X, params.graph_params.N_CLASSES, self. n_input_variables), dtype=(tf.float32)) print(loss_enc.get_shape()) self.loss_decf = tf.reduce_sum( loss_dec) # + tf.reduce_sum(loss_enc) self.loss_encf = tf.reduce_sum(loss_enc) # Training operations learning_rate = tf.train.cosine_decay_restarts( learning_rate=0.0001, global_step=global_step, first_decay_steps=10, t_mul=2.0, m_mul=1.0, alpha=0.01) trainer = tf.train.AdamOptimizer(learning_rate) self.saver = tf.train.Saver(max_to_keep=1000) self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(self.update_ops): # self.training_step_add = trainer.minimize(self.loss_misc) self.training_step_enc = trainer.minimize(self.loss_encf) self.training_step_dec = trainer.minimize(self.loss_decf)
y_test) = setup_finetuning(baseline, classes, dataset, noise, mean=0.0, std=15.0) print('Loaded baseline!') # Validation splitting (X_train_noisy, y_train), (X_valid_noisy, y_valid) = dataset_split(X_train_noisy, y_train, return_data='samples') # Image pre-processing: scale pixel values X_train_noisy_sc, X_mean, X_std = image_preprocessing(X_train_noisy, scale_only=False) X_valid_noisy_sc, _, _ = image_preprocessing(X_valid_noisy, seq_mean=X_mean, seq_std=X_std, scale_only=False) X_test_noisy_sc, _, _ = image_preprocessing(X_test_noisy, seq_mean=X_mean, seq_std=X_std, scale_only=False) # Dataloaders train_noisy_dl = get_data_loader(X_train_noisy_sc, y_train, shuffle=True) valid_noisy_dl = get_data_loader(X_valid_noisy_sc, y_valid, shuffle=True) test_noisy_dl = get_data_loader(X_test_noisy_sc, y_test, shuffle=True) # Writer