manifestfile = os.path.join(root_files, 'train-index.csv') testmanifest = os.path.join(root_files, 'val-index.csv') train = train_loader(manifestfile, root_files, be, h=im_size[0], w=im_size[1], scale=[0.875, 0.875]) test = validation_loader(testmanifest, root_files, be, h=im_size[0], w=im_size[1], scale=[0.875, 0.875], ncls=n_classes) OneHot = OneHot(be, n_classes) # Graph input is_train = tf.placeholder(tf.bool) keep_prob = tf.placeholder(tf.float32) x_n = tf.placeholder(tf.float32, [batch_size, 3, im_size[0], im_size[1]]) y = tf.placeholder(tf.float32, [batch_size, n_classes]) lr_tf = tf.placeholder(tf.float32) z = tf.random_uniform([batch_size, zdim], -1, 1) iny = tf.placeholder(tf.float32, [batch_size, n_classes]) # Discriminator def discriminator(inp, reuse=False): with tf.variable_scope('Encoder', reuse=reuse): # 64
ninput = np.asarray(input_vector) expect = [1.0 for x in range(states.siz())] res = sess.run( [self.nn.out, self.nn.optimize, self.nn.error, self.nn.fill], { self.nn.state: [ninput], self.nn.expect: [expect], self.nn.fill: [[self.stack.fill()]] }) print(res[2], res[3], act, res[0], self.stack.debug()) self.stack.clear() #sess.run() [open, data, close, last] = oh.Item.items(['open', 'data', 'close', 'last']) inputs = oh.Group(['open', 'data', 'close', 'last']) states = oh.Group.array('state', 4) nnpda = NNPDA(states, inputs, 20, .01) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) tf.summary.SessionLog vec = [[open, data, close], [open, data, data, close], [open, data, data, data, close]] for x in range(100): for y in range(4): nnpda.train(vec[x % 1])