def test_block_inversion():
    batch_size = 13
    height, width = 32, 32
    num_channel = 3
    block = InvertibleBlock(
        in_shape=(batch_size, height, width, num_channel),
        stride=2,
        num_channel=num_channel,  # channel of intermediate layers
        coeff=0.97,
        power_iter=1,
        num_trace_samples=2,
        num_series_terms=2,
        activation=tf.nn.elu,
        use_sn=True,
        use_actnorm=False)

    x = tf.random.normal(shape=(batch_size, height, width, num_channel))

    out, trace = block(x)

    x_inverse = block.inverse(out)

    diff = tf.reduce_mean(tf.abs(x - x_inverse))

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    _diff = sess.run(diff)
    print("Inversion difference is: {}".format(_diff))
Beispiel #2
0
def process():
    tf.disable_eager_execution()
    sess = tf.InteractiveSession(graph=tf.Graph())

    ConvRT_model = hub.Module("../Sentence_Encoder/Embeddings/ConvRT/")

    USE_model = hub.load('../Sentence_Encoder/Embeddings/USE_QA/')

    sess.run(tf.tables_initializer())
    sess.run(tf.global_variables_initializer())

    with open("Processed_Scripts/Bot_Profile.pkl", "rb") as fp:
        bot_profile = pickle.load(fp)

    with open("Processed_Scripts/Chatterbot.pkl", "rb") as fp:
        chatterbot = pickle.load(fp)

    bot_queries = [k for k, v in bot_profile.items()]
    bot_contexts = ["" for k, v in bot_profile.items()]

    chatterbot_queries = [k for k, v in chatterbot.items()]
    chatterbot_contexts = ["" for k, v in chatterbot.items()]

    embedded_bot_queries = encode(sess, bot_queries, bot_contexts, USE_model,
                                  ConvRT_model)
    embedded_chatterbot_queries = encode(sess, chatterbot_queries,
                                         chatterbot_contexts, USE_model,
                                         ConvRT_model)

    with open("Processed_Scripts/embedded_bot_queries.pkl", "wb") as fp:
        pickle.dump(embedded_bot_queries, fp)

    with open("Processed_Scripts/embedded_chatterbot_queries.pkl", "wb") as fp:
        pickle.dump(embedded_chatterbot_queries, fp)
Beispiel #3
0
def playGame():
    sess=tf.InteractiveSession()
    s, readout, h_fc1 = createNetwork("m1")

    s_2, readout_2, h_fc1_2 = createNetwork("m2")

    trainNetwork(s, readout, s_2, readout_2, sess )
Beispiel #4
0
    def init_session(self, device_id=0):
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True  ## just for use the necesary memory of GPU
        config.gpu_options.visible_device_list = str(device_id)  ## this values depends of numbers of GPUs

        print("Session and graph initialized.")
        self.sess = tf.InteractiveSession(config=config, graph=self)
 def test_build_graph(self, dataset_name, regression):
   """Test whether build_graph works as expected."""
   data_x, data_y, _ = data_utils.load_dataset(dataset_name)
   data_gen = data_utils.split_training_dataset(
       data_x, data_y, n_splits=5, stratified=not regression)
   (x_train, y_train), (x_validation, y_validation) = next(data_gen)
   sess = tf.InteractiveSession()
   graph_tensors_and_ops, metric_scores = graph_builder.build_graph(
       x_train=x_train,
       y_train=y_train,
       x_test=x_validation,
       y_test=y_validation,
       activation='exu',
       learning_rate=1e-3,
       batch_size=256,
       shallow=True,
       regression=regression,
       output_regularization=0.1,
       dropout=0.1,
       decay_rate=0.999,
       name_scope='model',
       l2_regularization=0.1)
   # Run initializer ops
   sess.run(tf.global_variables_initializer())
   sess.run([
       graph_tensors_and_ops['iterator_initializer'],
       graph_tensors_and_ops['running_vars_initializer']
   ])
   for _ in range(2):
     sess.run(graph_tensors_and_ops['train_op'])
   self.assertIsInstance(metric_scores['train'](sess), float)
   sess.close()
Beispiel #6
0
def test_net():
    print(bcolors.G + "Task : test\n" + bcolors.END)
    student = lenet()
    pred = tf.nn.softmax(student)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
    sess = tf.InteractiveSession(config=tf.ConfigProto(
        gpu_options=gpu_options))
    init = tf.initialize_all_variables()
    sess.run(init)
    with tf.device('/gpu:0'):
        saver = tf.train.Saver()
        model_file = tf.train.latest_checkpoint(model)
        saver.restore(sess, model_file)

    mean = cal_mean()
    data, label = read_cifar10('test')
    total = 0
    correct = 0
    begin = time.time()
    for j in range(len(data) // batch_size):
        batch_x = data[j * batch_size:(j + 1) * batch_size] - mean
        prob = sess.run([pred],
                        feed_dict={
                            x: batch_x,
                            y: np.ones((batch_size)),
                            keep_prob: 1.0
                        })
        if np.argmax(prob[0]) == label[j]:
            correct += 1
        total += 1
        #print(("acc = %f . %d/%d"%(float(correct)/total, correct, total))
    end = time.time()
    print("acc = %f . %d/%d.  Computing time = %f seconds" %
          (float(correct) / total, correct, total, end - begin))
Beispiel #7
0
def test_iresnet():

  np.random.seed(2019)

  batch_size = 2
  input_size = 8
  channels = 3

  input_shape = (batch_size, input_size, input_size, channels)
  x_np = np.random.rand(*input_shape).astype("float32")
  x = tf.placeholder(tf.float32, [None, input_size, input_size, channels])

  net = IResNet(
    in_shape=input_shape,
    block_list=[32, 32, 32], #[1, 2, 3],
    stride_list=[1, 2, 2],
    channel_list=[32, 32, 32],#[2, 3, 5],
    num_trace_samples=4,
    num_series_terms=3,
    coeff=0.97,
    power_iter=2)

  log_prob_z, trace, loss, z = net(x)
  a = net.inverse(z)

  sess = tf.InteractiveSession()
  sess.run(tf.global_variables_initializer())

  _log_prob_z, _trace, _loss, _z, _a = sess.run([log_prob_z, trace, loss, z, a], feed_dict={x: x_np})
  print("Loss is: {}".format(_loss))
  print("Trace is: {}".format(_trace))
  print("Log prob is: {}".format(_log_prob_z))
  print("z shape is: {}".format(z.shape))
  print("a shape is {}".format(a.shape))
Beispiel #8
0
    def test_layer_with_activation_after_bn_different_activation_before_bn(
            self):
        """Tests the option to use activation before or after batchnorm."""
        tf.reset_default_graph()
        model_config = make_layerwise_model_config()
        act_fn = tf.ones_like
        layer_act_after = layerwise.ConvLayer(
            name='test_layer_activation_after',
            model_config=model_config,
            act_fn=act_fn,
            act_after_bn=True)
        layer_act_before = layerwise.ConvLayer(
            name='test_layer_activation_before',
            model_config=model_config,
            act_fn=act_fn,
            act_after_bn=False)
        images = tf.random.normal((100, 28, 28, 3))
        out_after = layer_act_after(images)
        out_before = layer_act_before(images)

        sess = tf.InteractiveSession()
        sess.run(tf.initializers.global_variables())
        self.assertAllEqual(
            out_after, tf.ones_like(out_after),
            'When evaluating layerwise.ConvLayer activation after'
            'BatchNorm was not computed properly.')
        self.assertAllEqual(
            out_before, tf.zeros_like(out_before),
            'When evaluating layerwise.ConvLayer activation before'
            'BatchNorm was not computed properly.')
Beispiel #9
0
def save_all():
    import tensorflow as tf
    import tensorflow.compat.v1 as tf1
    import tqdm
    sess = tf1.InteractiveSession()
    image_ph = tf.compat.v1.placeholder(dtype=tf.uint8, shape=(None, None, 3))
    encode_op = tf.image.encode_png(image_ph)
    maketree('mnistimg/mnist')
    with open('mnistimg/mnist-validation.txt', 'w') as filenames:
        for i in tqdm.trange(len(test_images)):
            label = test_labels[i]
            image = test_images[i]
            path = "mnist/validation/{}/{}.png".format(label, i)
            filenames.write(path + '\n')
            filenames.flush()
            path = "mnistimg/" + path
            maketree(os.path.dirname(path))
            data = sess.run(encode_op, {image_ph: as_rgb(image)})
            with open(path, "wb") as f:
                f.write(data)
    with open('mnistimg/mnist-train.txt', 'w') as filenames:
        for i in tqdm.trange(len(train_images)):
            label = train_labels[i]
            image = train_images[i]
            path = "mnist/train/{}/{}.png".format(label, i)
            filenames.write(path + '\n')
            filenames.flush()
            path = "mnistimg/" + path
            maketree(os.path.dirname(path))
            data = sess.run(encode_op, {image_ph: as_rgb(image)})
            with open(path, "wb") as f:
                f.write(data)
Beispiel #10
0
def main():
    tf.compat.v1.disable_eager_execution()
    # data = tf.keras.datasets.mnist
    # (x_train, y_train), (x_test, y_test) = data.load_data()

    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    # plt.imshow(x_train[1], cmap="binary")
    # plt.show()
    sess = tfc.InteractiveSession()
    x = tfc.placeholder("float", shape=[None, 784])
    y_ = tfc.placeholder("float", shape=[None, 10])
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    sess.run(tfc.initialize_all_variables())
    y = tf.nn.softmax(tf.matmul(x, W) + b)
    cross_entropy = -tf.reduce_sum(y_ * tfc.log(y))
    train_step = tfc.train.GradientDescentOptimizer(0.01).minimize(
        cross_entropy)
    for i in range(1000):
        batch = mnist.train.next_batch(50)
        train_step.run(feed_dict={x: batch[0], y_: batch[1]})
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    print(
        "测试集的正确率为",
        accuracy.eval(feed_dict={
            x: mnist.test.images,
            y_: mnist.test.labels
        }))
Beispiel #11
0
 def __init__(self, observation_width, observation_height, action_space, model_file, log_file):
     # the state is the input vector of network, in this env, it has four dimensions
     self.state_dim = observation_width * observation_height
     self.state_w = observation_width
     self.state_h = observation_height
     # the action is the output vector and it has two dimensions
     self.action_dim = action_space
     # init experience replay, the deque is a list that first-in & first-out
     self.replay_buffer = deque()
     # you can create the network by the two parameters
     self.create_Q_network()
     # after create the network, we can define the training methods
     self.create_updating_method()
     # set the value in choose_action
     self.epsilon = INITIAL_EPSILON
     self.model_path = model_file + "/save_model.ckpt"
     self.model_file = model_file
     self.log_file = log_file
     # 因为保存的模型名字不太一样,只能检查路径是否存在
     # Init session
     self.session = tf.InteractiveSession()
     if os.path.exists(self.model_file):
         print("model exists , load model\n")
         self.saver = tf.train.Saver()
         self.saver.restore(self.session, self.model_path)
     else:
         print("model don't exists , create new one\n")
         self.session.run(tf.global_variables_initializer())
         self.saver = tf.train.Saver()
     # init
     # 只有把框图保存到文件中,才能加载到浏览器中观看
     self.writer = tf.summary.FileWriter(self.log_file, self.session.graph)
     ####### 路径中不要有中文字符,否则加载不进来 ###########
     # tensorboard --logdir=logs_gpu --host=127.0.0.1
     self.merged = tf.summary.merge_all()
Beispiel #12
0
    def create_session(self, improve_by=5, min_epoch=10):
        self.objs['saver'] = tf.train.Saver()
        # self.objs['sess'] = tf.Session(config = self.session_config)
        self.objs['sess'] = tf.InteractiveSession()
        self.objs['sess'].run(tf.global_variables_initializer())
        # (Disabled) Early Stopping
        # self.objs['es'] = utils.EarlyStopping(
        #     self.objs['sess'],
        #     self.objs['saver'],
        #     save_dir = "saved_seed%d" % self.seed,
        #     improve_by = improve_by,
        #     min_epoch = min_epoch
        # )

        if self.feature_extractor_needed:
            if not os.path.exists("vgg16_cifar100"):
                print("Pretrained model doesnt exist for VGG16")
                print("Run cifar100.py first")
                exit(0)
            else:
                reqd_variables = tf.get_collection(
                    tf.GraphKeys.GLOBAL_VARIABLES, scope="feature_extractor")
                feature_extractor_saver = tf.train.Saver(reqd_variables)
                print("Restoring feature extractor variables")
                feature_extractor_saver.restore(self.objs['sess'],
                                                "vgg16_cifar100/saved.ckpt")
                print("Done")
Beispiel #13
0
    def initialize_model(self):
        """
        Initialize model structure and set cost function and training method
        """
        self.y_pred = self.build_extractor(self.X)

        # weighted cross entropy
        self.cost_extractor = cross_entropy_with_clip(self.Y, self.y_pred,
                                                      self.classweights)

        # Do include the following to update BN parameters
        # https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            self.train_op_extractor = tf.train.AdamOptimizer(
                0.001, beta1=0.9).minimize(self.cost_extractor)

        # start session
        self.sess = tf.InteractiveSession()
        tf.global_variables_initializer().run()

        # saver
        self.saver = tf.train.Saver()

        # summary
        tf.summary.scalar("cross_entropy", self.cost_extractor)
        self.summary_op = tf.summary.merge_all()
        self.writer = tf.summary.FileWriter(self.logdir, self.sess.graph)
    def __init__(self, state_size, window_size, trend, skip, batch_size):
        self.state_size = state_size
        self.window_size = window_size
        self.half_window = window_size // 2
        self.trend = trend
        self.skip = skip
        self.action_size = 3
        self.batch_size = batch_size
        self.memory = deque(maxlen=1000)
        self.inventory = []

        self.gamma = 0.95
        self.epsilon = 0.5
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.999

        tf.reset_default_graph()
        self.sess = tf.InteractiveSession()
        self.X = tf.placeholder(tf.float32, [None, self.state_size])
        self.Y = tf.placeholder(tf.float32, [None, self.action_size])
        feed = tf.layers.dense(
            self.X, 256,
            activation=tf.nn.relu)  # 1 hidden layer with 256 nodes
        ## one hidden layer, dimension hidden layer = 10,  dimension output layer = 1
        # hidden = tf.layers.dense(tf_x, 10, tf.nn.relu)
        # output = tf.layers.dense(hidden, 1, tf.nn.relu)
        self.logits = tf.layers.dense(
            feed, self.action_size
        )  # output layer with 3 nodes corresponding to output node
        self.cost = tf.reduce_mean(tf.square(self.Y -
                                             self.logits))  # L2 cost function
        self.optimizer = tf.train.GradientDescentOptimizer(1e-5).minimize(
            self.cost)  # standard GD optimizer
        self.sess.run(tf.global_variables_initializer())  # initialized session
    def _load_frozen_graph(self, frozen_graph_path):
        trt_graph = tf.GraphDef()
        with open(frozen_graph_path, 'rb') as f:
            trt_graph.ParseFromString(f.read())

        self.graph = tf.Graph()
        with self.graph.as_default():
            self.output_node = tf.import_graph_def(
                trt_graph,
                return_elements=[
                    'detection_boxes:0', 'detection_classes:0',
                    'detection_scores:0', 'num_detections:0',
                    'raw_outputs/lstm_c:0', 'raw_outputs/lstm_h:0'
                ])
        self.session = tf.InteractiveSession(graph=self.graph)

        tf_scores = self.graph.get_tensor_by_name('import/detection_scores:0')
        tf_boxes = self.graph.get_tensor_by_name('import/detection_boxes:0')
        tf_classes = self.graph.get_tensor_by_name(
            'import/detection_classes:0')
        tf_num_detections = self.graph.get_tensor_by_name(
            'import/num_detections:0')
        tf_lstm_c = self.graph.get_tensor_by_name(
            'import/raw_outputs/lstm_c:0')
        tf_lstm_h = self.graph.get_tensor_by_name(
            'import/raw_outputs/lstm_h:0')

        self._output_nodes = [
            tf_scores, tf_boxes, tf_classes, tf_num_detections, tf_lstm_c,
            tf_lstm_h
        ]

        self.lstm_c = np.ones((1, 8, 8, 320))
        self.lstm_h = np.ones((1, 8, 8, 320))
Beispiel #16
0
def main():
    args = parse_arguments()

    # Dataset
    dataset = Dataset(**vars(args))

    # Reset the default graph and set a graph-level seed
    tf.reset_default_graph()
    tf.set_random_seed(9)

    # Model
    model = Model(num_classes=dataset.num_classes, **vars(args))
    model.construct_model()

    # Session
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()
    tf.local_variables_initializer().run()

    # Prune
    prune.prune(args, model, sess, dataset)

    # Train and test
    train.train(args, model, sess, dataset)
    test.test(args, model, sess, dataset)

    sess.close()
    sys.exit()
    def __init__(self, num_actions):
        self.num_actions = num_actions
        self.epsilon = INITIAL_EPSILON
        self.epsilon_step = (INITIAL_EPSILON -
                             FINAL_EPSILON) / EXPLORATION_STEPS
        self.t = 0

        # Create replay memory
        self.replay_memory = deque()

        # Create q network
        self.s, self.q_values, q_network = self._build_network()
        q_network_weights = q_network.trainable_weights

        # Create target network
        self.st, self.target_q_values, target_network = self._build_network()
        target_network_weights = target_network.trainable_weights

        # Define target network update operation
        self.update_target_network = [
            target_network_weights[i].assign(q_network_weights[i])
            for i in range(len(target_network_weights))
        ]

        # Define loss and gradient update operation
        self.a, self.y, self.loss, self.grads_update = self._build_training_op(
            q_network_weights)

        self.sess = tf.InteractiveSession()
        self.saver = tf.train.Saver(q_network_weights)

        self.sess.run(tf.global_variables_initializer())

        # Initialize target network
        self.sess.run(self.update_target_network)
Beispiel #18
0
def set_up():
    # variables which we need to fill in when we are ready to compute the graph.
    # We'll pass in the values of the x-axis to a placeholder called X.
    X = tf.placeholder(tf.float32, name='X')

    # And we'll also specify what the y values should be using another placeholder, y.
    Y = tf.placeholder(tf.float32, name='Y')
    sess = tf.InteractiveSession()
    n = tf.random_normal([1000], stddev=0.1).eval()
    # plt.hist(n)

    # We're going to multiply our input by 10 values, creating an "inner layer"
    # of n_neurons neurons.
    n_neurons = 10
    # W = tf.Variable(tf.random_normal([1, n_neurons]), name='W')
    # #
    # # # and allow for n_neurons additions on each of those neurons
    # b = tf.Variable(tf.constant(0, dtype=tf.float32, shape=[n_neurons]), name='b')
    #
    # # Instead of just multiplying, we'll put our n_neuron multiplications through a non-linearity, the tanh function.
    # h = tf.nn.tanh(tf.matmul(tf.expand_dims(X, 1), W) + b, name='h')

    h = linear(tf.expand_dims(X, 1), 1, 10, scope='layer1', activation=tf.nn.tanh)
    h2 = linear(h, 10, 3, scope='layer2', activation=tf.nn.tanh)

    Y_pred = tf.reduce_sum(h2, 1)

    cost = tf.reduce_mean(distance(Y_pred, Y))


    return X, Y, Y_pred, cost
Beispiel #19
0
def valid_nin():
    print(
        bcolors.G +
        "Task : val\nvalidate the pre-trained nin model, should be same as caffe result"
        + bcolors.END)
    pool3 = nin()
    #pool3 = lenet()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
    sess = tf.InteractiveSession(config=tf.ConfigProto(
        gpu_options=gpu_options))
    init = tf.initialize_all_variables()
    sess.run(init)
    mean = cal_mean()
    data, label = read_cifar10('test')
    total = 0
    correct = 0
    begin = time.time()
    for j in range(len(data) // batch_size):
        batch_x = data[j * batch_size:(j + 1) * batch_size] - mean
        prob = sess.run([pool3],
                        feed_dict={
                            x: batch_x,
                            y: np.ones((batch_size)),
                            keep_prob: 1.0
                        })
        if np.argmax(prob[0]) == label[j]:
            correct += 1
        total += 1
    end = time.time()
    print("acc = %f . %d/%d.  Computing time = %f seconds" %
          (float(correct) / total, correct, total, end - begin))
    def __init__(self, state_size, window_size, trend, skip, batch_size):
        self.state_size = state_size
        self.window_size = window_size
        self.half_window = window_size // 2
        self.trend = trend
        self.skip = skip
        self.action_size = 3
        self.batch_size = batch_size
        self.memory = deque(maxlen=1000)
        self.inventory = []

        self.gamma = 0.95
        self.epsilon = 0.5
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.999

        tf.reset_default_graph()
        self.sess = tf.InteractiveSession()
        self.X = tf.placeholder(tf.float32, [None, self.state_size])
        self.Y = tf.placeholder(tf.float32, [None, self.action_size])
        feed = tf.layers.dense(self.X, 512, activation=tf.nn.relu)
        tensor_action, tensor_validation = tf.split(feed, 2, 1)
        feed_action = tf.layers.dense(tensor_action, self.action_size)
        feed_validation = tf.layers.dense(tensor_validation, 1)
        self.logits = feed_validation + \
            tf.subtract(feed_action, tf.reduce_mean(
                feed_action, axis=1, keep_dims=True))
        self.cost = tf.reduce_mean(tf.square(self.Y - self.logits))
        self.optimizer = tf.train.GradientDescentOptimizer(1e-5).minimize(
            self.cost)
        self.sess.run(tf.global_variables_initializer())
Beispiel #21
0
def save_config(params_dict, logdir):
    print(f"Saving config to {logdir}")
    text = "{\n\n"
    total_params = len(params_dict)
    for count, key in enumerate(params_dict):
        config_value = str(params_dict[key])
        if re.search('[a-zA-Z]', config_value):
            if config_value.lower() != 'true':
                if config_value.lower() != 'false':
                    if config_value[0] != '[':
                        # TODO: Making a manual exception for parsing epsilon right now since it's the only number in
                        #       scientific notation. Should fix this.
                        if key != "epsilon":
                            config_value = f'"{config_value}"'
        if count == total_params - 1:
            text += f'"{str(key)}"' + ' : ' + config_value + '\n\n'
        else:
            text += f'"{str(key)}"' + ' : ' + config_value + ',\n\n'
    text += '\n\n}'
    sess = tf.InteractiveSession()
    summary_op = tf.summary.text("run_config", tf.convert_to_tensor(text))
    summary_writer = tf.summary.FileWriter(f"{logdir}/config", sess.graph)
    text = sess.run(summary_op)
    summary_writer.add_summary(text, 0)
    summary_writer.flush()
    summary_writer.close()
    tf.reset_default_graph()
    print('Done!')
Beispiel #22
0
 def __init__(self, state_size, window_size, trend, skip):
     self.state_size = state_size
     self.window_size = window_size
     self.half_window = window_size // 2
     self.trend = trend
     self.skip = skip
     self.X = tf.placeholder(tf.float32, (None, self.state_size))
     self.REWARDS = tf.placeholder(tf.float32, (None))
     self.ACTIONS = tf.placeholder(tf.int32, (None))
     feed_forward = tf.layers.dense(self.X,
                                    self.LAYER_SIZE,
                                    activation=tf.nn.relu)
     self.logits = tf.layers.dense(feed_forward,
                                   self.OUTPUT_SIZE,
                                   activation=tf.nn.softmax)
     input_y = tf.one_hot(self.ACTIONS, self.OUTPUT_SIZE)
     loglike = tf.log((input_y * (input_y - self.logits) + (1 - input_y) *
                       (input_y + self.logits)) + 1)
     rewards = tf.tile(tf.reshape(self.REWARDS, (-1, 1)),
                       [1, self.OUTPUT_SIZE])
     self.cost = -tf.reduce_mean(loglike * (rewards + 1))
     self.optimizer = tf.train.AdamOptimizer(
         learning_rate=self.LEARNING_RATE).minimize(self.cost)
     self.sess = tf.InteractiveSession()
     self.sess.run(tf.global_variables_initializer())
Beispiel #23
0
def main():
 
    sess = tf.InteractiveSession()
 
    inp, out = createGraph()
 
    trainGraph(inp, out, sess)
def make_prediction(img, MODEL_PATH):
    tf.enable_eager_execution()
    to_predict = preprocess(img)
    x_img = to_predict.shape[0]
    y_img = to_predict.shape[1]
    if len(to_predict.shape) == 3:
        z_img = to_predict.shape[2]
    else:
        z_img = 1
    PIXEL_COUNT = x_img * y_img * z_img
    LABEL_COUNT = 1
    to_predict_reshape = np.reshape(to_predict, (1, x_img * y_img * z_img))
    g = tf.Graph()
    with g.as_default():
        session = tf.InteractiveSession()
        model = build_model(PIXEL_COUNT, LABEL_COUNT)
        saver = tf.train.Saver()
        saver.restore(session, MODEL_PATH)
        predictions = model.output.eval(
            session=session,
            feed_dict={model.x_placeholder: to_predict_reshape})
        session.close()
    box_image, x, y, width, height = plot_images(
        to_predict, (predictions + 1) * (64, 32, 64, 32))
    plate = result(to_predict, x, y, height, width)
    return box_image, plate
Beispiel #25
0
 def __init__(self, state_size, window_size, trend, skip):
     self.state_size = state_size
     self.window_size = window_size
     self.half_window = window_size // 2
     self.trend = trend
     self.INITIAL_FEATURES = np.zeros((4, self.state_size))
     self.skip = skip
     tf.reset_default_graph()
     self.actor = Actor('actor-original', self.state_size, self.OUTPUT_SIZE,
                        self.LAYER_SIZE)
     self.actor_target = Actor('actor-target', self.state_size,
                               self.OUTPUT_SIZE, self.LAYER_SIZE)
     self.critic = Critic('critic-original', self.state_size,
                          self.OUTPUT_SIZE, self.LAYER_SIZE,
                          self.LEARNING_RATE)
     self.critic_target = Critic('critic-target', self.state_size,
                                 self.OUTPUT_SIZE, self.LAYER_SIZE,
                                 self.LEARNING_RATE)
     self.grad_critic = tf.gradients(self.critic.logits, self.critic.Y)
     self.actor_critic_grad = tf.placeholder(tf.float32,
                                             [None, self.OUTPUT_SIZE])
     weights_actor = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                       scope='actor')
     self.grad_actor = tf.gradients(self.actor.logits, weights_actor,
                                    -self.actor_critic_grad)
     grads = zip(self.grad_actor, weights_actor)
     self.optimizer = tf.train.AdamOptimizer(
         self.LEARNING_RATE).apply_gradients(grads)
     self.sess = tf.InteractiveSession()
     self.sess.run(tf.global_variables_initializer())
Beispiel #26
0
    def __init__(self, state_size, window_size, trend, skip):
        self.state_size = state_size
        self.window_size = window_size
        self.half_window = window_size // 2
        self.trend = trend
        self.skip = skip
        tf.reset_default_graph()
        self.X = tf.placeholder(tf.float32, (None, self.state_size))
        self.Y = tf.placeholder(tf.float32, (None, self.state_size))
        self.ACTION = tf.placeholder(tf.float32, (None))
        self.REWARD = tf.placeholder(tf.float32, (None))
        self.batch_size = tf.shape(self.ACTION)[0]

        with tf.variable_scope('curiosity_model'):
            action = tf.reshape(self.ACTION, (-1, 1))
            state_action = tf.concat([self.X, action], axis=1)
            save_state = tf.identity(self.Y)

            feed = tf.layers.dense(state_action, 32, activation=tf.nn.relu)
            self.curiosity_logits = tf.layers.dense(feed, self.state_size)
            self.curiosity_cost = tf.reduce_sum(
                tf.square(save_state - self.curiosity_logits), axis=1)

            self.curiosity_optimizer = tf.train.RMSPropOptimizer(self.LEARNING_RATE)\
            .minimize(tf.reduce_mean(self.curiosity_cost))

        total_reward = tf.add(self.curiosity_cost, self.REWARD)

        with tf.variable_scope("q_model"):
            with tf.variable_scope("eval_net"):
                x_action = tf.layers.dense(self.X, 128, tf.nn.relu)
                self.logits = tf.layers.dense(x_action, self.OUTPUT_SIZE)

            with tf.variable_scope("target_net"):
                y_action = tf.layers.dense(self.Y, 128, tf.nn.relu)
                y_q = tf.layers.dense(y_action, self.OUTPUT_SIZE)

            q_target = total_reward + self.GAMMA * tf.reduce_max(y_q, axis=1)
            action = tf.cast(self.ACTION, tf.int32)
            action_indices = tf.stack(
                [tf.range(self.batch_size, dtype=tf.int32), action], axis=1)
            q = tf.gather_nd(params=self.logits, indices=action_indices)
            self.cost = tf.losses.mean_squared_error(labels=q_target,
                                                     predictions=q)
            self.optimizer = tf.train.RMSPropOptimizer(
                self.LEARNING_RATE).minimize(
                    self.cost,
                    var_list=tf.get_collection(
                        tf.GraphKeys.TRAINABLE_VARIABLES, "q_model/eval_net"))

        t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                     scope='q_model/target_net')
        e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                     scope='q_model/eval_net')
        self.target_replace_op = [
            tf.assign(t, e) for t, e in zip(t_params, e_params)
        ]

        self.sess = tf.InteractiveSession()
        self.sess.run(tf.global_variables_initializer())
Beispiel #27
0
def main():
    # create session
    sess = tf.InteractiveSession()
    # input layer and output layer by creating graph
    inp, out = createGraph()
    # train our graph on input and output with session variables
    trainGraph(inp, out, sess)
Beispiel #28
0
    def optimise(self, lr, iterations, verbose):

        self.sess = tf.InteractiveSession()

        objective = self.objective  ###I have only made for optimising simple likelihoods but KL divergence could also be used

        optimiser = tf.train.AdamOptimizer(
            learning_rate=lr).minimize(objective)

        self.sess.run(tf.global_variables_initializer())

        progress = int(np.ceil(iterations / 10))

        for i in tqdm(range(iterations), leave=False):

            _, loss = self.sess.run((optimiser, objective), feed_dict={})

            if verbose and (i % progress) == 0:

                print('  opt iter {:5}: objective = {}'.format(i, loss))

        print('Noise Variance:', self.sess.run(self.noise_var))

        for i in range(len(self.hparams)):

            print(f'{self.hparamd[i]}:', self.sess.run(self.hparams[i]))
Beispiel #29
0
def trainer(model_params):
    """Train a sketch-rnn model."""
    np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True)

    tf.logging.info('sketch-rnn')
    tf.logging.info('Hyperparams:')
    tf.logging.info('Loading data files.')
    datasets = load_dataset(FLAGS.data_dir, model_params)

    train_set = datasets[0]
    valid_set = datasets[1]
    test_set = datasets[2]
    model_params = datasets[3]
    eval_model_params = datasets[4]

    reset_graph()
    model = sketch_rnn_model.Model(model_params)
    eval_model = sketch_rnn_model.Model(eval_model_params, reuse=True)

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    if FLAGS.resume_training:
        load_checkpoint(sess, FLAGS.log_root)

    # Write config file to json file.
    tf.gfile.MakeDirs(FLAGS.log_root)
    with tf.gfile.Open(os.path.join(FLAGS.log_root, 'model_config.json'),
                       'w') as f:
        json.dump(list(model_params.values()), f, indent=True)

    train(sess, model, eval_model, train_set, valid_set, test_set)
Beispiel #30
0
    def __init__(self, env):
        #Make Session
        sess = tf.InteractiveSession()

        self.env = env
        self.lr = 1e-4
        #self.critic_lr=0.001

        self.gamma = 0.99
        self.gae = 0.95
        self.net = ACModel(
            env,
            'net',
        )
        self.old_net = ACModel(env, 'old')

        #PPO Parameters
        self.ppo_epochs = 10
        self.batch_size = 64
        self.clip_range = 0.2

        #For entropy for exploration
        self.ent_coef = 0.01

        self.saver = tf.train.Saver(max_to_keep=5000)
        self.build_update()
        self.build_update_models()

        self.utils = Utils()
        tf.get_default_session().run(tf.global_variables_initializer())