Exemple #1
0
 def feed(batch,cnoise):
     sess = ct.get_session()
     res = sess.run([train_step,loss],feed_dict={
         x:batch,
         code_noise:cnoise,
     })
     return res[1]
Exemple #2
0
    def __init__(
            self,
            action_space,
            discount_factor=.99,  # gamma
    ):
        self.rpm = rpm(100000)  # 10k history
        self.plotter = plotter(num_lines=2)
        self.render = True
        self.training = True

        num_of_actions = 8
        self.outputdims = num_of_actions
        self.discount_factor = discount_factor

        ids, ods = None, num_of_actions

        self.actor = self.create_actor_network(ids, ods)
        self.critic = self.create_critic_network(ids, ods)
        self.actor_target = self.create_actor_network(ids, ods)
        self.critic_target = self.create_critic_network(ids, ods)

        self.feed, self.joint_inference, sync_target = self.train_step_gen()

        sess = ct.get_session()
        sess.run(tf.global_variables_initializer())

        sync_target()
Exemple #3
0
 def test(batch,quanth):
     sess = ct.get_session()
     res = sess.run([binary_code_test,y_test,binary_code,y,x],feed_dict={
         x:batch,
         quantization_threshold:quanth,
     })
     return res
Exemple #4
0
 def feed(lr=.01):
     nonlocal white_loss, descent_step, learning_rate
     sess = ct.get_session()
     res = sess.run([descent_step, white_loss],
                    feed_dict={learning_rate: lr})
     loss = res[1]
     return loss
Exemple #5
0
def replace_original():
    sess = ct.get_session()
    rg = guangzhou.view()
    rg.shape = (1, ) + guangzhou.shape
    v = tf.Variable(rg)
    sess.run(tf.variables_initializer([v]))
    sess.run(tf.assign(white_noise_image, v))
Exemple #6
0
 def feed(xi, yi, train=True):
     sess = ct.get_session()
     if train:
         res = sess.run([loss, acc, train_step], feed_dict={x: xi, gt: yi})
     else:
         res = sess.run([loss, acc], feed_dict={x: xi, gt: yi})
     return res[0:2]
def apply_vgg(tensor):
    print('importing VGG16...')
    from keras.applications.vgg16 import VGG16
    from keras import backend as K
    K.set_session(ct.get_session()) # make sure we are in the same universe

    vgginst = VGG16(include_top=False, weights='imagenet', input_tensor=tensor)
    return vgginst.get_layer('block1_conv2').output
	def __init__(self,
		observation_space_dims,
		discount_factor,
		nb_actions = 19,
		rpm_size = 1500000,
		train_mult = 10




		):

		self.training = True
		self.discount_factor = discount_factor
		#self.noise_source = one_fsq_noise()
		#self.train_counter = 0
		self.train_multiplier = train_mult



		self.rpm = rpm(rpm_size)

		# Deal only with the continuous space for now...
		self.inputdims = observation_space_dims
		self.outputdims = nb_actions

		def clamper(actions):

			return np.clip(actions, a_max = 1.0 , a_min = 0.0)

		self.clamper = clamper

		ids, ods = self.inputdims, self.outputdims

		#with tf.device('/device:GPU:0'):

		self.actor = self.create_actor_network(ids,ods)
		self.critic = self.create_critic_network(ids,ods)
		self.actor_target = self.create_actor_network(ids,ods)
		self.critic_target = self.create_critic_network(ids,ods)


		self.feed, self.joint_inference, sync_target = self.train_step_gen()


		sess = ct.get_session()
		sess.run(tf.global_variables_initializer())

		sync_target()

		import threading as th
		self.lock = th.Lock()

		self.reward_plotter = plt.figure()
		self.reward_collector = []
		self.learn_reward_collector = []

		self.phased_noise_anneal_duration = 100
Exemple #9
0
 def feed(xin, yin, ilr):
     sess = ct.get_session()
     res = sess.run([train_step, loss],
                    feed_dict={
                        x: xin,
                        gt: yin,
                        lr: ilr
                    })
     return res[1]  # loss
 def predict(st, i):
     # stateful, to enable fast generation.
     sess = ct.get_session()
     res = sess.run([stateful_y, ending_state],
                    feed_dict={
                        input_text: i,
                        starting_state: st
                    })
     return res
 def stateful_predict(st,i):
     sess = ct.get_session()
     if st is None: # if swe dont have starting_state for the RNN
         res = sess.run([stateful_y_init,ending_state_init],
             feed_dict={input_text:i})
     else:
         res = sess.run([stateful_y,ending_state],
             feed_dict={input_text:i,starting_state:st})
     return res
Exemple #12
0
        def joint_inference(state):
            #print('joint inference')
            sess = ct.get_session()
            #print('got session')
            #print('pg state  359:', state)
            res = sess.run([a_infer, q_infer], feed_dict={s1: state})

            #print('ran')
            return res
Exemple #13
0
 def feed(memory):
     [s1d, a1d, r1d, isdoned, s2d] = memory  # d suffix means data
     sess = ct.get_session()
     res = sess.run(
         [critic_loss, actor_loss, cstep, astep, shift1, shift2],
         feed_dict={
             s1: s1d,
             a1: a1d,
             r1: r1d,
             isdone: isdoned,
             s2: s2d,
             tau: 1e-3
         })
Exemple #14
0
    def stateful_predict(st, i):
        # stateful, to enable fast generation.
        sess = ct.get_session()

        if st is None:  # if starting state not exist yet
            res = sess.run([y, _ending_state], feed_dict={x: i})
        else:
            res = sess.run([stateful_y, ending_state],
                           feed_dict={
                               x: i,
                               starting_state: st
                           })
        return res
Exemple #15
0
        def feed(memory):
            [s1d,a1d,r1d,isdoned,s2d] = memory # d suffix means data
            sess = ct.get_session()
            res = sess.run([critic_loss,actor_loss,
                cstep,astep,shift1,shift2],
                feed_dict={
                s1:s1d,a1:a1d,r1:r1d,isdone:isdoned,s2:s2d,tau:1e-3
                })

            #debug purposes
            self.feedcounter+=1
            if self.feedcounter%10==0:
                print(' '*30, 'closs: {:6.4f} aloss: {:6.4f}'.format(
                    res[0],res[1]),end='\r')
Exemple #16
0
def show(save=False):
    sess = ct.get_session()
    res = sess.run(vggmodel_d.input)
    image = res[0]
    image += 0.5
    cv2.imshow('result', image)
    cv2.waitKey(1)
    cv2.waitKey(1)
    if save:
        global show_counter, show_prefix
        cv2.imwrite('./log/' + show_prefix + '_' + str(show_counter) + '.jpg',
                    image * 255.)
        show_counter += 1
    return image
    def feed(batch,cnoise,init=False):
        sess = ct.get_session()
        if init:
            res = sess.run([train_step_init,loss_init],feed_dict={
                x:batch,
                code_noise:cnoise,
            })
        else:
            res = sess.run([train_step,loss],feed_dict={
                x:batch,
                code_noise:cnoise,
            })

        return res[1]
Exemple #18
0
def r(ep=10000, lr=1e-4):
    sess = ct.get_session()

    np.random.shuffle(xt)
    shuffled_cifar = xt
    length = len(shuffled_cifar)

    for i in range(ep):
        global noise_level
        noise_level *= 0.999
        print('---------------------------')
        print('iter', i, 'noise', noise_level)

        # sample from cifar
        j = i % int(length / batch_size)
        minibatch = shuffled_cifar[j * batch_size:(j + 1) * batch_size]

        # train for one step
        losses = gan_feed(sess, minibatch, noise_level, lr)
        print('dloss:{:6.4f} gloss:{:6.4f}'.format(losses[0], losses[1]))

        if i == ep - 1 or i % 20 == 0: show()
Exemple #19
0
def gen():

    x = ph([None, None, 1])  # grayscale
    gt = ph([None, None, 3])  # UVA

    y = model(x - 0.5)

    sigmred = tf.nn.sigmoid(y[:, :, :, 2:3])

    importance = gt[:, :, :, 2:3]
    # ratios = tf.reduce_mean(importance,axis=[1,2,3],keep_dims=True)
    # scaled_importance = importance/ratios

    sqrdiff = (y[:, :, :, 0:2] - gt[:, :, :, 0:2])**2

    #loss = tf.reduce_mean(sqrdiff * importance) + \
    #   ct.mean_sigmoid_cross_entropy_loss(y[:,:,:,2:3],gt[:,:,:,2:3]) * 0.01
    rmsloss = tf.reduce_mean(sqrdiff * importance)
    celoss = ct.mean_sigmoid_cross_entropy_loss(y[:, :, :, 2:3], importance)
    loss = celoss + rmsloss

    opt = tf.train.AdamOptimizer(1e-3)

    train_step = opt.minimize(loss, var_list=model.get_weights())

    sess = ct.get_session()

    def feed(qr, uv):
        res = sess.run([train_step, loss], feed_dict={x: qr, gt: uv})
        return res[1]

    def test(qr):
        res = sess.run([y, sigmred], feed_dict={x: qr})
        return res

    return feed, test
		def joint_inference(state):

			sess = ct.get_session()
			res = sess.run([a_infer,q_infer],feed_dict={s1:state})
			return res
Exemple #21
0
    def __init__(
        self,
        observation_space_dims,
        action_space,
        stack_factor=1,
        discount_factor=.99,  # gamma
        # train_skip_every=1,
        train_multiplier=1,
    ):
        self.rpm = rpm(1000000)  # 1M history
        self.plotter = plotter(num_lines=3)
        self.render = True
        self.training = True
        self.noise_source = one_fsq_noise()
        self.train_counter = 0
        # self.train_skip_every = train_skip_every
        self.train_multiplier = train_multiplier
        self.observation_stack_factor = stack_factor

        self.inputdims = observation_space_dims * self.observation_stack_factor
        # assume observation_space is continuous

        self.is_continuous = True if isinstance(action_space, Box) else False

        if self.is_continuous:  # if action space is continuous

            low = action_space.low
            high = action_space.high

            num_of_actions = action_space.shape[0]

            self.action_bias = high / 2. + low / 2.
            self.action_multiplier = high - self.action_bias

            # say high,low -> [2,7], then bias -> 4.5
            # mult = 2.5. then [-1,1] multiplies 2.5 + bias 4.5 -> [2,7]

            def clamper(actions):
                return np.clip(actions,
                               a_max=action_space.high,
                               a_min=action_space.low)

            self.clamper = clamper
        else:
            num_of_actions = action_space.n

            self.action_bias = .5
            self.action_multiplier = .5  # map (-1,1) into (0,1)

            def clamper(actions):
                return np.clip(actions, a_max=1., a_min=0.)

            self.clamper = clamper

        self.outputdims = num_of_actions
        self.discount_factor = discount_factor
        ids, ods = self.inputdims, self.outputdims
        print('inputdims:{}, outputdims:{}'.format(ids, ods))

        self.actor = self.create_actor_network(ids, ods)
        self.critic = self.create_critic_network(ids, ods)
        self.actor_target = self.create_actor_network(ids, ods)
        self.critic_target = self.create_critic_network(ids, ods)

        # print(self.actor.get_weights())
        # print(self.critic.get_weights())

        self.feed, self.joint_inference, sync_target = self.train_step_gen()

        sess = ct.get_session()
        sess.run(tf.global_variables_initializer())

        sync_target()

        import threading as th
        self.lock = th.Lock()

        if not hasattr(self, 'wavegraph'):
            num_waves = self.outputdims * 2 + 1

            def rn():
                r = np.random.uniform()
                return 0.2 + r * 0.4

            colors = []
            for i in range(num_waves - 1):
                color = [rn(), rn(), rn()]
                colors.append(color)
            colors.append([0.2, 0.5, 0.9])
            self.wavegraph = wavegraph(num_waves, 'actions/noises/Q',
                                       np.array(colors))
Exemple #22
0
    def sync_target():
      sess = ct.get_session()
      sess.run([shift1,shift2],feed_dict={tau:1.})

		return feed, joint_inference, sync_target
Exemple #23
0
		self.clamper = clamper

		ids, ods = self.inputdims, self.outputdims

		

    self.actor = self.create_actor_network(ids,ods)
    self.critic = self.create_critic_network(ids,ods)
    self.actor_target = self.create_actor_network(ids,ods)
    self.critic_target = self.create_critic_network(ids,ods)


    self.feed, self.joint_inference, sync_target = self.train_step_gen()


    sess = ct.get_session()
    sess.run(tf.global_variables_initializer())

		sync_target()

		import threading as th
		self.lock = th.Lock()

		#self.reward_plotter = plt.figure()
		self.reward_collector = []
		self.learn_reward_collector = []

		self.phased_noise_anneal_duration = 100

	# a = actor(s) : predict actions given state
	def create_actor_network(self,inputdims,outputdims):
Exemple #24
0
def feed_gen(output_size=[512, 512]):
    # all the logic
    ct.set_session(
        K.get_session()
    )  # because keras load model variables into his own session, so we have to use it

    def into_variable(value):
        v = tf.Variable(initial_value=value)
        sess = ct.get_session()
        sess.run([tf.variables_initializer([v])])
        return v

    print('output size chosen:', output_size)

    # create white_noise_image
    global white_noise_image
    white_noise_image = into_variable(
        tf.random_normal([1] + output_size + [3], stddev=1e-3))
    print('white_noise_image initialized.')

    # the model to descent the white noise image
    global vggmodel_d, vggmodel_e
    vggmodel_d = VGG19(include_top=False,
                       weights='imagenet',
                       input_tensor=white_noise_image)
    vggmodel_d.summary()

    reference_image = ct.ph([None, None, 3])
    # the model to extract style representations
    vggmodel_e = VGG19(include_top=False,
                       weights='imagenet',
                       input_tensor=reference_image)
    #vggmodel_e.summary()

    print('VGG models created.')

    def get_representations(vggmodel):

        # activations of each layer, 5 layers for style capture, 1 layer for content capture.
        layer_for_styles = list(
            filter(lambda x: 'conv1' in x.name or 'block5_conv3' in x.name,
                   vggmodel.layers))
        style_activations = [i.output for i in layer_for_styles]
        layer_for_content = ['block5_conv2']
        content_activations = [
            vggmodel.get_layer(l).output for l in layer_for_content
        ]

        def gram_4d(i):
            # calculate gram matrix (inner product) of feature maps.
            # where gram[n1, n2] is the correlation between two out of n features.

            # for example two feature map are each sensitive to tree and flower,
            # then gram[tree, flower] tells you how tree and flower are
            # correlated in the layer activations.
            # in other words, how likely tree and flower will appear together.

            # this correlation does not depend on position in image,
            # and that's why we can calculate style loss globally.
            # in other words, we don't care about the exact position of features,
            # but how likely each of them appear with another.

            # assume input is 4d tensor of shape [1, h, w, f]
            s = tf.shape(i)

            # reshape into feature matrix of shape [h*w, f]
            fm = tf.reshape(i, [s[1] * s[2], s[3]])

            # inner product
            gram = tf.matmul(tf.transpose(fm), fm)  # [f, f]

            # because h*w*f elements are included in computing the inner product,
            # we have to normalize the result:
            gram = gram / tf.cast((s[1] * s[2] * s[3]) * 2, tf.float32)
            return gram

        gram_matrices = [gram_4d(i) for i in style_activations]

        return gram_matrices, content_activations

    # get the gram matrices of the style reference image
    style_gram_matrices, content_activations = get_representations(vggmodel_e)

    # image shape manipulation: from HWC into NHWC
    sn = starry_night.view()
    sn.shape = (1, ) + sn.shape

    sess = ct.get_session()
    gram_ref = sess.run([style_gram_matrices], feed_dict={reference_image:
                                                          sn})[0]

    print('reference style gram matrices calculated.')

    # load style references into memory
    style_references = [into_variable(gr) for gr in gram_ref]

    print('reference style gram matrices loaded into memory as variables.')

    # get content representation of the content image
    gz = guangzhou.view()
    gz.shape = (1, ) + gz.shape

    reference_content_activations = sess.run([content_activations],
                                             feed_dict={reference_image:
                                                        gz})[0]

    print('reference content representations calculated.')

    # load content reps into memory
    reference_content_activations = [
        into_variable(rca) for rca in reference_content_activations
    ]
    print('reference content activations loaded into memory as variables.')

    # calculate losses of white_noise_image's style wrt style references:
    white_gram_matrices, white_content_activations = get_representations(
        vggmodel_d)

    def square_loss(
            g1, g2):  # difference between two gram matrix, used as style loss
        return tf.reduce_sum((g1 - g2)**2)

    white_style_losses = [
        square_loss(white_gram_matrices[idx], style_references[idx])
        for idx, gs in enumerate(style_references)
    ]

    # calculate losses of white_noise_image's content wrt content reference:
    white_content_losses = [
        tf.reduce_mean((reference_content_activations[idx] -
                        white_content_activations[idx])**2)
        for idx, _ in enumerate(reference_content_activations)
    ]

    def amplitude_penalty(tensor, ceiling=0.499, floor=-0.499):
        p = tf.maximum(white_noise_image - ceiling, 0) + tf.maximum(
            floor - white_noise_image, 0)
        return p

    def proportional_loss(
            lis):  # similar to reduce mean, adds penalty if imbalance.
        mean_loss = tf.reduce_mean(lis)
        pro_loss = tf.reduce_mean([abs(l - mean_loss) for l in lis])
        return mean_loss + pro_loss * 5

    white_amplitude_penalty = amplitude_penalty(white_noise_image)

    white_loss = proportional_loss([
        proportional_loss(white_style_losses),
        tf.reduce_mean(white_content_losses) * .01
    ])

    white_loss += tf.reduce_mean(white_amplitude_penalty**2) * 10000

    # minimize loss by gradient descent on white_noise_image
    learning_rate = tf.Variable(0.01)

    #adam = tf.train.AdamOptimizer(learning_rate)
    adam = tf.train.AdamOptimizer(learning_rate)
    print('connecting momentum sgd optimizer...')
    descent_step = adam.minimize(white_loss, var_list=[white_noise_image])
    slots = [
        adam.get_slot(white_noise_image, name)
        for name in adam.get_slot_names()
    ]

    # initialize the white_noise_image and optimizer slots.
    sess.run([
        tf.variables_initializer([white_noise_image] + slots +
                                 list(adam._get_beta_accumulators()))
    ])

    def feed(lr=.01):
        nonlocal white_loss, descent_step, learning_rate
        sess = ct.get_session()
        res = sess.run([descent_step, white_loss],
                       feed_dict={learning_rate: lr})
        loss = res[1]
        return loss

    print('feed function generated.')
    return feed
Exemple #25
0
 def joint_inference(state):
     sess = ct.get_session()
     res = sess.run([a_infer, q_infer],
                    feed_dict={s1[k]: state[k]
                               for k in [0, 1]})
     return res
Exemple #26
0
        loss_values = res[1]
        return loss_values  #[dloss,gloss]

    return gan_feed


if __name__ == '__main__':
    print('loading cifar...')
    global xt, yt, xv, yv
    xt, yt, xv, yv = cifar()

    print('generating GAN...')
    gan_feed = gan(gm, dm)

    ct.get_session().run(tf.global_variables_initializer())
    print('Ready. enter r() to train, show() to test')

noise_level = .1


def r(ep=10000, lr=1e-4):
    sess = ct.get_session()

    np.random.shuffle(xt)
    shuffled_cifar = xt
    length = len(shuffled_cifar)

    for i in range(ep):
        global noise_level
        noise_level *= 0.999
Exemple #27
0
def clear():
    ct.get_session().run(tf.variables_initializer([white_noise_image]))
    print('white noise image cleared.')
		def sync_target():
			sess = ct.get_session()
			sess.run([shift1,shift2],feed_dict={tau:1.})
Exemple #29
0
    xt, yt = needsamples(1)

    index = np.random.choice(len(xt))
    mbx = xt[index:index + 1]
    mby = yt[index:index + 1]

    gru_state = None
    resarr = []
    for i in range(len(mbx[0])):  # timesteps
        resy, state = stateful_predict(gru_state, mbx[0:1, i:i + 1])
        resarr.append(resy)  # [1,1,h,w,1]
        gru_state = state

    resarr = np.concatenate(resarr, axis=1)

    print(resarr.shape)

    vis.show_batch_autoscaled(mbx[0], name='input image')
    vis.show_batch_autoscaled(resarr[0], name='inference')
    vis.show_batch_autoscaled(mby[0], name='ground truth')


if __name__ == '__main__':
    # timg,tgt = load_dataset('drone_dataset_96x96')
    # tgtd = downsample(tgt)
    # tgtd = tgt

    feed, stateful_predict = trainer()
    ct.get_session().run(ct.gvi())  # global init
    print('ready. enter r() to train, show() to test.')
Exemple #30
0
 def into_variable(value):
     v = tf.Variable(initial_value=value)
     sess = ct.get_session()
     sess.run([tf.variables_initializer([v])])
     return v