def _create_siamese(net_path, net_x, net_z): # read mat file from net_path and start TF Siamese graph from placeholders X and Z params_names_list, params_values_list = _import_from_matconvnet(net_path) # loop through the flag arrays and re-construct network, reading parameters of conv and bnorm layers for i in range(_num_layers): print('> Layer ' + str(i + 1)) # conv conv_W_name = _find_params('conv' + str(i + 1) + 'f', params_names_list)[0] conv_b_name = _find_params('conv' + str(i + 1) + 'b', params_names_list)[0] print('Conv:setting %s %s' % (conv_W_name, conv_b_name)) print('Conv : stride %s filter-group %s' % (str(_conv_stride[i]), str(_filtergroup_yn[i]))) conv_W = params_values_list[params_names_list.index(conv_W_name)] conv_b = params_values_list[params_names_list.index(conv_b_name)] # batchnorm if _bnorm_yn[i]: bn_beta_name = _find_params('bn' + str(i + 1) + 'b', params_names_list)[0] bn_gamma_name = _find_params('bn' + str(i + 1) + 'm', params_names_list)[0] bn_moments_name = _find_params('bn' + str(i + 1) + 'x', params_names_list)[0] #print ('\t\tBNORM: setting '+bn_beta_name+' '+bn_gamma_name+' '+bn_moments_name bn_beta = params_values_list[params_names_list.index(bn_beta_name)] bn_gamma = params_values_list[params_names_list.index( bn_gamma_name)] bn_moments = params_values_list[params_names_list.index( bn_moments_name)] bn_moving_mean = bn_moments[:, 0] bn_moving_variance = bn_moments[:, 1]**2 # saved as std in matconvnet else: bn_beta = bn_gamma = bn_moving_mean = bn_moving_variance = [] # set up conv "block" with bnorm and activation net_x = set_convolutional(net_x, conv_W, np.swapaxes(conv_b,0,1), _conv_stride[i], \ bn_beta, bn_gamma, bn_moving_mean, bn_moving_variance, \ filtergroup=_filtergroup_yn[i], batchnorm=_bnorm_yn[i], activation=_relu_yn[i], \ scope='conv'+str(i+1), reuse=False) # notice reuse=True for Siamese parameters sharing net_z = set_convolutional(net_z, conv_W, np.swapaxes(conv_b,0,1), _conv_stride[i], \ bn_beta, bn_gamma, bn_moving_mean, bn_moving_variance, \ filtergroup=_filtergroup_yn[i], batchnorm=_bnorm_yn[i], activation=_relu_yn[i], \ scope='conv'+str(i+1), reuse=True) # add max pool if required if _pool_stride[i] > 0: print("_pool_stride") return net_z, net_x, params_names_list, params_values_list
def _create_siamese(net_path, net_x, net_z): # read mat file from net_path and start TF Siamese graph from placeholders X and Z params_names_list, params_values_list = _import_from_matconvnet(net_path) # loop through the flag arrays and re-construct network, reading parameters of conv and bnorm layers for i in xrange(_num_layers): print '> Layer '+str(i+1) # conv conv_W_name = _find_params('conv'+str(i+1)+'f', params_names_list)[0] conv_b_name = _find_params('conv'+str(i+1)+'b', params_names_list)[0] print '\t\tCONV: setting '+conv_W_name+' '+conv_b_name print '\t\tCONV: stride '+str(_conv_stride[i])+', filter-group '+str(_filtergroup_yn[i]) conv_W = params_values_list[params_names_list.index(conv_W_name)] conv_b = params_values_list[params_names_list.index(conv_b_name)] # batchnorm if _bnorm_yn[i]: bn_beta_name = _find_params('bn'+str(i+1)+'b', params_names_list)[0] bn_gamma_name = _find_params('bn'+str(i+1)+'m', params_names_list)[0] bn_moments_name = _find_params('bn'+str(i+1)+'x', params_names_list)[0] print '\t\tBNORM: setting '+bn_beta_name+' '+bn_gamma_name+' '+bn_moments_name bn_beta = params_values_list[params_names_list.index(bn_beta_name)] bn_gamma = params_values_list[params_names_list.index(bn_gamma_name)] bn_moments = params_values_list[params_names_list.index(bn_moments_name)] bn_moving_mean = bn_moments[:,0] bn_moving_variance = bn_moments[:,1]**2 # saved as std in matconvnet else: bn_beta = bn_gamma = bn_moving_mean = bn_moving_variance = [] # set up conv "block" with bnorm and activation net_x = set_convolutional(net_x, conv_W, np.swapaxes(conv_b,0,1), _conv_stride[i], \ bn_beta, bn_gamma, bn_moving_mean, bn_moving_variance, \ filtergroup=_filtergroup_yn[i], batchnorm=_bnorm_yn[i], activation=_relu_yn[i], \ scope='conv'+str(i+1), reuse=False) # notice reuse=True for Siamese parameters sharing net_z = set_convolutional(net_z, conv_W, np.swapaxes(conv_b,0,1), _conv_stride[i], \ bn_beta, bn_gamma, bn_moving_mean, bn_moving_variance, \ filtergroup=_filtergroup_yn[i], batchnorm=_bnorm_yn[i], activation=_relu_yn[i], \ scope='conv'+str(i+1), reuse=True) # TODO: Are we using max pooling? Remove if not. # add max pool if required if _pool_stride[i]>0: print '\t\tMAX-POOL: size '+str(_pool_sz)+ ' and stride '+str(_pool_stride[i]) net_x = tf.nn.max_pool(net_x, [1,_pool_sz,_pool_sz,1], strides=[1,_pool_stride[i],_pool_stride[i],1], padding='VALID', name='pool'+str(i+1)) net_z = tf.nn.max_pool(net_z, [1,_pool_sz,_pool_sz,1], strides=[1,_pool_stride[i],_pool_stride[i],1], padding='VALID', name='pool'+str(i+1)) print return net_z, net_x, params_names_list, params_values_list
def _create_siamese(net_path, net_x, net_z, istrain): # loop through the flag arrays and re-construct network, reading parameters of conv and bnorm layers for i in range(_num_layers): print( '> Layer '+str(i+1)) # set up conv "block" with bnorm and activation net_x = set_convolutional(net_x, conv_W, np.swapaxes(conv_b,0,1), _conv_stride[i], istrain=istrain, \ filtergroup=_filtergroup_yn[i], batchnorm=_bnorm_yn[i], activation=_relu_yn[i], \ scope='conv'+str(i+1), reuse=False) # notice reuse=True for Siamese parameters sharing net_z = set_convolutional(net_z, conv_W, np.swapaxes(conv_b,0,1), _conv_stride[i], istrain=istrain, \ filtergroup=_filtergroup_yn[i], batchnorm=_bnorm_yn[i], activation=_relu_yn[i], \ scope='conv'+str(i+1), reuse=True) # add max pool if required if _pool_stride[i]>0: print( '\t\tMAX-POOL: size '+str(_pool_sz)+ ' and stride '+str(_pool_stride[i])) net_x = tf.nn.max_pool(net_x, [1,_pool_sz,_pool_sz,1], strides=[1,_pool_stride[i],_pool_stride[i],1], padding='VALID', name='pool'+str(i+1)) net_z = tf.nn.max_pool(net_z, [1,_pool_sz,_pool_sz,1], strides=[1,_pool_stride[i],_pool_stride[i],1], padding='VALID', name='pool'+str(i+1)) print() return net_z, net_x, params_names_list, params_values_list
def create_net(net_x, net_z): #------------------------------------------------------------------------- #function//net_x:instance frame ;net_z:template frame #------------------------------------------------------------------------- #not sure #W_param_list=[n for n in range(0,_nums_layers)] #b_param_list=[n for n in range(0,_nums_layers)] for i in range(_nums_layers): print('Layer ' + str(i + 1)) #set up the conv bolck #set_convolutional(X,stride,bn_beta,bn_gamma,bn_init_mean,bn_init_var,batchnorm=True,activation=True,reuse=False,scope=None): print(net_z) print(net_x) net_x = set_convolutional( net_x, [_conv_w_sz[i], _conv_w_sz[i], _conv_w_in_c[i], _conv_w_out[i]], _conv_w_out[i], #the shape of W and b _conv_stride[i], 0, 0, 0, 0, batchnorm=False, activation=_if_relu[i], reuse=True, scope='conv' + str(i + 1)) net_z = set_convolutional( net_z, [_conv_w_sz[i], _conv_w_sz[i], _conv_w_in_c[i], _conv_w_out[i]], _conv_w_out[i], #the shape of W and b _conv_stride[i], 0, 0, 0, 0, batchnorm=False, activation=_if_relu[i], reuse=True, scope='conv' + str(i + 1)) print(net_z) print(net_x) print('Layer ' + str(i + 1) + ' conv end') #if having the pooling if _pool_stride[i] > 0: print("_pool_stride") net_x = tf.nn.max_pool( net_x, [1, _pool_sz[i], _pool_sz[i], 1], strides=[1, _pool_stride[i], _pool_stride[i], 1], padding='VALID', name='pool' + str(i + 1)) net_z = tf.nn.max_pool( net_z, [1, _pool_sz[i], _pool_sz[i], 1], strides=[1, _pool_stride[i], _pool_stride[i], 1], padding='VALID', name='pool' + str(i + 1)) print(net_z) print(net_x) print('Layer ' + str(i + 1) + ' end') return net_z, net_x