neg+=negtm pickle.dump([neg, pos], open('negpos.pkl', 'wb')) """ neg, pos = pickle.load(open('negpos.pkl', 'rb')) #neg = create_neg(input_shape)[None,:,:,:] #pos= create_pos(input_shape)[None,:,:,:] inputs = tf.placeholder(tf.float32, shape=(batch_size, input_shape[0], input_shape[1], 1)) modify = [] for i in range(1, 4): modify.append('conv%d' % i) logits, net, activations, modifys = sc(inputs, modify=modify) print modifys modifyv = {} for i in range(1, 4): name = 'conv%d' % i print name modifyv[name] = np.ones(activations[name].shape) with tf.Session() as sess: saver = tf.train.Saver() #saver.restore(sess,'ckpts5/39900.ckpt') #saver.restore(sess,'ckpts_manual_noise_gauss/1.ckpt') #saver.restore(sess,'ckpts_manual/1.ckpt') saver.restore(sess, 'ckpts_manual_noise_gauss2/1.ckpt') fd = {modifys['conv%d' % i]: modifyv['conv%d' % i] for i in range(1, 4)} fdpos = {inputs: pos} fdpos.update(fd)
from sc_manual import sc os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from sample_maker import create_pos, create_neg # Create mini-batch for demo input_shape = (8, 8) batch_size = 1 ch = 3 gradients = OrderedDict() activations = OrderedDict() inputs = tf.placeholder(tf.float32, shape=(batch_size, input_shape[0], input_shape[1], 1)) logits, net, activations = sc(inputs) with tf.Session() as sess: saver = tf.train.Saver() #saver.restore(sess,'ckpts_manual_noise/1.ckpt') #saver.restore(sess,'ckpts_manual_noise_0mean/1.ckpt') saver.restore(sess, 'ckpts_manual_noise_gauss2/1.ckpt') for tv in tf.trainable_variables(): tvv = sess.run(tv) if tv.name.find('weights') > -1: _, _, bchs, tchs = tvv.shape for bch, tch in product(range(bchs), range(tchs)): print tv.name, bch, tch print tvv[:, :, bch, tch] if tv.name.find('biases') > -1: tchs = tvv.shape[0] for tch in range(tchs):