Net.add(LSTM(20,input_shape=(None,1),
#             activation="linear",
#             recurrent_activation="linear",
             return_sequences=True)) # LSTM layer with 10 neurons
Net.add(Dense(1,activation="linear"))

Net.compile(loss='mean_squared_error',
              optimizer='adam')


# In[174]:


# assuming we can take the solution Xs from above, 
# let's see what the network makes of this!
X1=Net.predict_on_batch(Xs[0,0,:,:][:,:,None]) # needs input of shape [batchsize,timesteps,1]


# In[103]:


np.shape(X1)


# In[104]:


sample=3
plt.plot(ts,Xs[0,1,sample,:])
plt.plot(ts,X1[sample,:,0],color="orange")
plt.show()
    # so we can afterwards make the training step
    # on this set of images as one batch!
    input_images = np.zeros([nsteps, M, M, 3])

    # go through all time steps
    for t in range(nsteps):
        # Obtain the policy prediction given the current
        # situation:
        # the maze map is already stored
        # inside input_image[0,:,:,0]
        # but the treasure map and
        # the robot position need to be updated
        input_image[0, :, :, 1] = reward  # current treasure map!
        input_image[0, jx, jy, 2] = 1  # indicate position of robot!
        # now: evaluate policy network:
        policy_p = Policy.predict_on_batch(input_image)[0]

        # now policy_p is an array of 4 probabilities
        # [0] was needed to get rid of first index, which
        # would be the batchsize (but we have a batchsize of 1)

        # make a random step, according to the policy distribution
        p = np.random.uniform()
        cumulative_distribution = np.cumsum(policy_p)
        for pick in range(4):
            if p < cumulative_distribution[pick]:
                break

        # record the move
        states[t, 0] = jx
        states[t, 1] = jy
Exemple #3
0
for j in range(steps):
    y_in, y_target = my_generator1D(batchsize, x)
    costs[j], accuracy[j] = Net.train_on_batch(y_in, y_target)
    if j % skipsteps == 0:
        clear_output(wait=True)
        plt.plot(costs, color="darkblue", label="cost")
        plt.plot(accuracy, color="orange", label="accuracy")
        plt.legend()
        plt.show()

# In[37]:

# plot some examples:

y_pred = Net.predict_on_batch(y_in)
n_samples = 10
fig, ax = plt.subplots(ncols=n_samples, nrows=1, figsize=(10, 1))
Name = {}
Name[True] = "L"  # Lorentz
Name[False] = "G"  # Gauss
for j in range(n_samples):
    ax[j].plot(y_in[j, :])
    ax[j].set_ylim([-0.1, 1])
    ax[j].axis('off')
    ax[j].set_title(Name[y_target[j, 0] > 0.5] + "/" +
                    Name[y_pred[j, 0] > 0.5])
plt.show()

print("True Category / Network Prediction")
def main():
    max_features = 20000
    # cut texts after this number of words
    # (among top max_features most common words)
    max_len = 100
    batch_size = 32
    tcn_num_filters = 10

    print('Loading data...')
    (x_train, y_train), (x_test,
                         y_test) = imdb.load_data(num_words=max_features,
                                                  index_from=index_from_)
    print(len(x_train), 'train sequences')
    print(len(x_test), 'test sequences')

    x_val = [
        encode_text('The movie was very good. I highly recommend.'
                    ),  # will be at the end.
        encode_text(' '.join(["worst"] * 100)),
        encode_text(
            "Put all speaking her delicate recurred possible. "
            "Set indulgence discretion insensible bed why announcing. "
            "Middleton fat two satisfied additions. "
            "So continued he or commanded household smallness delivered. "
            "Door poor on do walk in half. "
            "Roof his head the what. "
            "Society excited by cottage private an it seems. "
            "Fully begin on by wound an. "
            "The movie was very good. I highly recommend. "
            "At declared in as rejoiced of together. "
            "He impression collecting delightful unpleasant by prosperous as on. "
            "End too talent she object mrs wanted remove giving. "
            "Man request adapted spirits set pressed. "
            "Up to denoting subjects sensible feelings it indulged directly.")
    ]

    y_val = [1, 0, 1]

    print('Pad sequences (samples x time)')
    x_train = sequence.pad_sequences(x_train, maxlen=max_len)
    x_test = sequence.pad_sequences(x_test, maxlen=max_len)
    x_val = sequence.pad_sequences(x_val, maxlen=max_len)
    print('x_train shape:', x_train.shape)
    print('x_test shape:', x_test.shape)
    print('x_val shape:', x_val.shape)
    y_train = np.array(y_train)
    y_test = np.array(y_test)
    y_val = np.array(y_val)

    x_val[x_val > max_features] = 2  # oov.

    for i in range(10):
        print(f'x_test[{i}]=', end=' | ')
        print_text(x_test[i])

    for i in range(len(x_val)):
        print(f'x_val[{i}]=', end=' | ')
        print_text(x_val[i])

    temporal_conv_net = TCN(nb_filters=tcn_num_filters,
                            kernel_size=7,
                            dilations=[1, 2, 4, 8, 16, 32])

    print(temporal_conv_net.receptive_field)

    model = Sequential()
    model.add(Embedding(max_features, 128, input_shape=(max_len, )))
    model.add(temporal_conv_net)
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))

    model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])

    tcn_layer_outputs = list(temporal_conv_net.layers_outputs)

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=4,
              validation_data=[x_test, y_test])

    if os.path.exists('acts'):
        shutil.rmtree('acts')
    os.makedirs('acts')

    print(model.predict_on_batch(x_val))
    print(y_val)

    visualize(model, x_test[0:10], max_len, tcn_num_filters, tcn_layer_outputs,
              'x_test')
    visualize(model, x_val, max_len, tcn_num_filters, tcn_layer_outputs,
              'x_val')
Exemple #5
0
        final_accuracy_Ligand_gen = history.history['val_accuracy'][-1]
        final_val_loss_Ligand_gen = history.history['val_loss'][-1]

        # Utilizing the trained model, the following code generates 100 novel molecules sequentially,
        # which are represented as SMILES strings. These strings are then appended to an array of strings.
        # Shows a progress bar.
        start = 'G'
        num = 100
        Ligand_gen_smiles = []
        for _ in tqdm(range(num)):
            sequence = start
            while (len(st.interpret(sequence)) <=
                   max_gen_length_smiles) and (sequence[-1] != 'E'):
                x = st.hot_encoder(st.interpret(sequence))
                preds = model.predict_on_batch(x)[0][-1]
                streched = np.log(preds) / sampling_temp
                streched_probs = np.exp(streched) / np.sum(np.exp(streched))
                next_character = np.random.choice(range(len(streched)),
                                                  p=streched_probs)
                sequence += st.table[next_character]
            sequence = sequence[1:].rstrip('E')
            Ligand_gen_smiles.append(sequence)

        # Outputs the generated molecules to a configuration file which is used for metrics calculations in
        # an iPython Notebook
        new_file = os.path.join('databases/output_molecules/Ligand_gen_smiles',
                                'Ligand_gen_smiles.smi')
        fi = open(new_file, "w")
        for smi in Ligand_gen_smiles:
            fi.write(str(smi) + '\n')
Exemple #6
0
costs = np.zeros(steps)
accuracy = np.zeros(steps)
skipsteps = 10

for j in range(steps):
    y_in, y_target = my_generator1D(batchsize, x)
    costs[j], accuracy[j] = Net.train_on_batch(y_in, y_target)
    if j % skipsteps == 0:
        clear_output(wait=True)
        plt.plot(costs, color="darkblue", label="costs")
        plt.plot(accuracy, color="orange", label="accuracy")
        plt.legend()
        plt.show()

# plot some examples:

y_pred = np.array(Net.predict_on_batch(y_in))
n_samples = 10
fig, ax = plt.subplots(ncols=n_samples, nrows=1, figsize=(10, 1))
Name = {}
Name[True] = "L"  # Lorentz
Name[False] = "G"  # Gauss
for j in tqdm(range(n_samples)):
    ax[j].plot(y_in[j, :])
    ax[j].set_ylim([-0.1, 1])
    ax[j].axis("off")
    ax[j].set_title(Name[y_target[j, 0] > 0.5] + "/" +
                    Name[y_pred[j, 0] > 0.5])
plt.show()

print("True Category / Network Prediction")
Exemple #7
0
test_input[:,int(nsteps/2):,0]=0.0 # delete second half!
test_correct_output=np.copy(Xs[0,0,:,:][:,:,None]) # shape [batchsize,timesteps,1]

for j in range(training_steps):
    X0=np.random.randn(2,number_particles,batchsize) # random initial conditions
    Xs,ts=solve(X0=X0,rhs=multiple_coupled_oscillators_parallel_rhs,T=2*T_half,nsteps=nsteps) # get solutions
    the_input=np.copy(Xs[0,0,:,:][:,:,None])
    the_input[:,int(nsteps/2):,0]=0.0 # delete second half!
    costs[j]=Net.train_on_batch(the_input,Xs[0,0,:,:][:,:,None]) # train x_0(t) (half)->x_0(t) (full)
    # needs input and output of shape [batchsize,timesteps,1]
    if j%skip_steps==0 or j==training_steps-1:
        clear_output(wait=True)
        fig=plt.figure(constrained_layout=True,figsize=(8,4))
        gs=fig.add_gridspec(ncols=4,nrows=2)
        cost_plot=fig.add_subplot(gs[0:2,0:2])
        cost_plot.plot(costs)
        test_output=Net.predict_on_batch(test_input) # get some test predictions
        for n in range(4):
            theplot=fig.add_subplot(gs[n%2,2+int(n/2)])
            theplot.plot(ts,test_input[n,:,0],color="black") # particle 0 for half trajectory
            theplot.plot(ts,test_correct_output[n,:,0],color="blue") # particle 0 for full trajectory
            theplot.plot(ts,test_output[n,:,0],color="orange") # NN-predicted full trajectory
        plt.show()


# This is pretty good! The network had to do several things:
# 
# - understand the coupled equations of motion for two oscillators simply by observing trajectories of a one of the two oscillators
# - understand how to indirectly learn about the initial conditions for *both* oscillators by only observing the first one for some time
# - then solve the coupled equations to do the prediction
Exemple #8
0
class MazeRL():
    # an empty playground... (even that turns out to be difficult enough currently)
    def empty_maze(self, width, height):
        """
        Generate an empty "maze" (just a wall around a free field)
        """
        mymaze = np.zeros([width, height], dtype='int')
        mymaze[:, 0] = 1
        mymaze[:, -1] = 1
        mymaze[0, :] = 1
        mymaze[-1, :] = 1
        return (mymaze)

    # Maze generation algorithm from wikipedia
    # the code was removed in January 2020, but you can still
    # access it under this link:
    # https://en.wikipedia.org/w/index.php?title=Maze_generation_algorithm&oldid=930153705

    def maze(self, width=81, height=51, complexity=.75, density=.75):
        """
        Generate a maze. Algorithm taken from wikipedia.
        https://en.wikipedia.org/w/index.php?title=Maze_generation_algorithm&oldid=930153705
        """
        # Only odd shapes
        shape = ((height // 2) * 2 + 1, (width // 2) * 2 + 1)
        # Adjust complexity and density relative to maze size
        complexity = int(complexity *
                         (5 * (shape[0] + shape[1])))  # number of components
        density = int(density * ((shape[0] // 2) *
                                 (shape[1] // 2)))  # size of components
        # Build actual maze
        Z = np.zeros(shape, dtype=bool)
        # Fill borders
        Z[0, :] = Z[-1, :] = 1
        Z[:, 0] = Z[:, -1] = 1
        # Make aisles
        for i in range(density):
            x, y = np.random.randint(0, shape[1] // 2) * 2, np.random.randint(
                0, shape[0] // 2) * 2  # pick a random position
            Z[y, x] = 1
            for j in range(complexity):
                neighbours = []
                if x > 1: neighbours.append((y, x - 2))
                if x < shape[1] - 2: neighbours.append((y, x + 2))
                if y > 1: neighbours.append((y - 2, x))
                if y < shape[0] - 2: neighbours.append((y + 2, x))
                if len(neighbours):
                    y_, x_ = neighbours[np.random.randint(
                        0,
                        len(neighbours) - 1)]
                    if Z[y_, x_] == 0:
                        Z[y_, x_] = 1
                        Z[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1
                        x, y = x_, y_
        return Z

    def setup(self,
              M=7,
              num_chests=1,
              delete_treasure=False,
              single_maze=True,
              batchsize=50,
              test_batchsize=5,
              nsteps=15,
              choose_random_positions_for_single_maze=False,
              place_chest=None,
              empty_maze=False,
              keep_maze=False,
              punish_wall_hit=0.5,
              train_for_action_map=False):
        """
        Setup everything in advance of running one or several
        trainings. The maze size M must be odd.
        You could re-run this to change some options like
        batchsize or even nsteps (number of time steps) and
        then continue training the same network.
        """
        if M % 2 == 0:
            raise ValueError("Maze size must be odd, but was M=" + str(M))

        # the five directions of motion [delta_jx,delta_jy]:
        self.directions = np.array([[1, 0], [0, -1], [0, 1], [-1, 0], [0, 0]])

        # keep all the global options that have been set
        self.M = M
        self.num_chests = num_chests
        self.delete_treasure = delete_treasure
        self.single_maze = single_maze
        self.batchsize = batchsize
        self.test_batchsize = test_batchsize
        self.nsteps = nsteps
        self.num_actions = len(self.directions)
        self.choose_random_positions_for_single_maze = choose_random_positions_for_single_maze
        self.place_chest = place_chest
        self.empty_maze = empty_maze
        self.punish_wall_hit = punish_wall_hit
        self.train_for_action_map = train_for_action_map

        # storing all the actions of the current trajectory
        self.actions = np.zeros([batchsize, nsteps], dtype='int')
        self.position = np.zeros([batchsize, nsteps, 2], dtype='int')

        # same for test batch (which is smaller):
        self.test_actions = np.zeros([test_batchsize, nsteps], dtype='int')
        self.test_position = np.zeros([test_batchsize, nsteps, 2], dtype='int')

        # we need to store all the input images
        # for a whole trajectory, for convenience
        # so we can afterwards make the training step
        # on this set of images as one batch!
        self.input_images = np.zeros([batchsize, nsteps, M, M, 3],
                                     dtype='float')
        self.test_input_images = np.zeros([test_batchsize, nsteps, M, M, 3],
                                          dtype='float')

        # the current position, for all samples of the batch in parallel:
        self.jx, self.jy = np.zeros([batchsize],
                                    dtype='int'), np.zeros([batchsize],
                                                           dtype='int')
        self.test_jx, self.test_jy = np.zeros(
            [test_batchsize], dtype='int'), np.zeros([test_batchsize],
                                                     dtype='int')

        if not keep_maze:
            # the maze maps, for all samples in parallel:
            self.world = np.zeros([batchsize, M, M], dtype='int')
            self.test_world = np.zeros([test_batchsize, M, M], dtype='int')

        # store cost function for the training step (out of curiosity)
        self.costs = None  # will be initialized in run

        # useful for some advanced array indexing:
        self.allsamples = np.arange(0, batchsize)
        self.test_allsamples = np.arange(0, test_batchsize)

        # generate test mazes on which the strategy will be
        # repeatedly illustrated:
        if not keep_maze:
            self.test_reward, self.test_input_image = self.prepare_batch(
                batchsize=test_batchsize,
                world=self.test_world,
                jx=self.test_jx,
                jy=self.test_jy)

        # special case: a single maze (not a fresh maze for every sample)
        if not keep_maze:
            if single_maze:
                if place_chest is not None:
                    self.test_reward[0, :, :] = 0
                    self.test_reward[0, place_chest[0], place_chest[1]] = 1.0

                for n in range(1, test_batchsize):
                    self.test_reward[n, :, :] = self.test_reward[0, :, :]
                    self.test_input_image[n, :, :, :] = self.test_input_image[
                        0, :, :, :]
                    self.test_world[n, :, :] = self.test_world[0, :, :]
                self.reward, self.input_image = self.prepare_batch(
                    batchsize=batchsize,
                    world=self.world,
                    jx=self.jx,
                    jy=self.jy)
                for n in range(batchsize):
                    self.reward[n, :, :] = self.test_reward[0, :, :]
                    self.input_image[n, :, :, :] = self.test_input_image[
                        0, :, :, :]
                    self.world[n, :, :] = self.test_world[0, :, :]
            w = np.where(self.test_reward[0, :, :] > 0.5)
            self.chest_x = w[0][0]  # position of (first) treasure chest
            self.chest_y = w[1][0]

        self.orig_test_world = np.copy(self.test_world)
        self.orig_test_reward = np.copy(self.test_reward)

    def compile_net(self, eta=0.0001, try_adam=False):
        """
        Compile the network. You could re-compile this
        with a different learning rate.
        """
        # we have to use categorical cross entropy for the
        # policy gradient update rule!
        # also, we should not use adam (which tries to be adaptive),
        # but just plain old simple stochastic gradient descent,
        # if we want the policy gradient update to be correctly
        # implemented!
        if try_adam:
            self.Policy.compile(loss='categorical_crossentropy',
                                optimizer=optimizers.Adam(learning_rate=eta))
        else:
            self.Policy.compile(loss='categorical_crossentropy',
                                optimizer=optimizers.SGD(learning_rate=eta,
                                                         clipnorm=1.0))

    def create_network(self):
        # first define the policy neural network
        self.Policy = Sequential()
        self.Policy.add(
            Conv2D(self.num_channels,
                   kernel_size=self.kernel_size,
                   input_shape=(self.M, self.M, 3),
                   activation="elu",
                   padding="same"))
        self.Policy.add(
            Conv2D(self.num_channels,
                   kernel_size=self.kernel_size,
                   activation="elu",
                   padding="same"))
        self.Policy.add(
            Conv2D(self.num_channels,
                   kernel_size=self.kernel_size,
                   activation="elu",
                   padding="same"))
        self.Policy.add(
            Conv2D(self.num_channels,
                   kernel_size=self.kernel_size,
                   activation="elu",
                   padding="same"))
        if not self.train_for_action_map:
            self.Policy.add(Flatten())
            self.Policy.add(Dense(self.num_actions, activation="softmax"))
        else:
            self.Policy.add(
                Conv2D(self.num_actions,
                       kernel_size=self.kernel_size,
                       activation="softmax",
                       padding="same"))
        # the output layer with the probabilities

    def __init__(self,
                 M=7,
                 eta=0.0001,
                 num_chests=1,
                 delete_treasure=False,
                 single_maze=True,
                 batchsize=50,
                 test_batchsize=5,
                 nsteps=15,
                 try_adam=False,
                 choose_random_positions_for_single_maze=False,
                 place_chest=None,
                 kernel_size=5,
                 num_channels=5,
                 empty_maze=False,
                 punish_wall_hit=0.5,
                 train_for_action_map=False,
                 extra_visualizer=None):
        """
        Initialize a neural network for the policy gradient applied to
        arbitrary mazes. This also initializes all kinds of global variables
        that will be needed for the training.
        
        See also: setup (which will be called from here).
        
        If you called this like mymaze=MazeRL(...), then afterwards
        the network will be available as mymaze.Policy. You could change
        that to replace it by your own hand-crafted network (do that
        before starting training).
        """

        self.try_adam = try_adam
        self.kernel_size = kernel_size
        self.num_channels = num_channels
        self.extra_visualizer = extra_visualizer

        # the setup
        self.setup(M=M,
                   num_chests=num_chests,
                   delete_treasure=delete_treasure,
                   single_maze=single_maze,
                   batchsize=batchsize,
                   test_batchsize=test_batchsize,
                   nsteps=nsteps,
                   choose_random_positions_for_single_maze=
                   choose_random_positions_for_single_maze,
                   place_chest=place_chest,
                   empty_maze=empty_maze,
                   punish_wall_hit=punish_wall_hit,
                   train_for_action_map=train_for_action_map)

        self.create_network()

        self.compile_net(eta=eta, try_adam=try_adam)

    def prepare_batch(self, batchsize, world, jx, jy):
        """
        prepare one batch, return:
            reward,input_image
        Arrays world and jx,jy need already to exist, but will
        be filled with values.
        """

        M = self.M
        empty_maze = self.empty_maze

        # a map of rewards (the 'chests' are here!)
        reward = np.zeros([batchsize, M, M], dtype='float')

        for sample in range(batchsize):
            # make a new maze (a new one in each trial!)
            if not empty_maze:
                world[sample, :, :] = np.array(self.maze(width=M, height=M),
                                               dtype='int')
            else:
                world[sample, :, :] = np.array(self.empty_maze(width=M,
                                                               height=M),
                                               dtype='int')
            # random selection of reward sites (treasure chests)
            for n in range(self.num_chests):
                while True:
                    jx_target, jy_target = np.random.randint(
                        low=2, high=M - 2, size=2)  # avoid close to boundary!
                    if world[sample, jx_target,
                             jy_target] == 0:  # empty, keep it!
                        reward[sample, jx_target, jy_target] += 1
                        break

        # pick random initial position:
        for sample in range(batchsize):
            while True:
                jx_try, jy_try = np.random.randint(
                    low=2, high=M - 2, size=2)  # avoid close to boundary!
                if world[sample, jx_try, jy_try] == 0:
                    jx[sample], jy[sample] = jx_try, jy_try
                    break

        # prepare an input image for the network
        input_image = np.zeros([batchsize, M, M, 3], dtype='float')
        input_image[:, :, :, 0] = world
        # we can set this now, since it will not change during this trial

        return (reward, input_image)

    def run_one_trial(self, input_image, input_images, reward, jx, jy, actions,
                      position, world, allsamples, batchsize):
        """
        Run one trial starting at position jx,jy, storing returns in R, storing
        actions and position, using input_image and world.
        This will return R.
        """
        # set return to zero for this trajectory:
        R = np.zeros([self.batchsize])

        # go through all time steps
        for t in range(self.nsteps):
            # Obtain the policy prediction given the current
            # situation:
            # the maze map is already stored
            # inside input_image[0,:,:,0]
            # but the treasure map and
            # the robot position need to be updated
            input_image[:, :, :, 1] = reward  # current treasure map!
            input_image[self.allsamples, jx, jy, 2] = 1
            # last line indicates position of robot(s)! (for whole batch)
            # note: this uses advanced numpy indexing, so we use
            # three equal-length integer arrays a,b,c to do the
            # equivalent of
            #  for k in range(n):
            #      input_image[a[k],b[k],c[k],2]=1
            #
            # now: evaluate policy network on the whole batch:
            # this is where we gain efficiency!
            policy_p = self.Policy.predict_on_batch(input_image)

            if self.train_for_action_map:
                policy_p = policy_p[self.allsamples, jx, jy, :]

            #print(policy_p)
            if np.any(policy_p > 1.001):
                print("Oops, policy out of range:", policy_p)

            # now policy_p is an array of [batchsize,4] probabilities

            # make a random step, according to the policy distribution
            p = np.random.uniform(size=batchsize)
            cumulative_distribution = np.cumsum(policy_p,
                                                axis=1)  # note axis argument
            pick = np.argmax(cumulative_distribution > p[:, None], axis=1)
            # will give the index of the first entry that exceeds p, which is
            # exactly what we need (pick will still be a 1D array of 'batchsize' length)

            # record the move
            #print(pick[0],jx[0],jy[0])
            actions[:, t] = pick
            position[:, t, 0] = jx
            position[:, t, 1] = jy
            input_images[:,
                         t, :, :, :] = np.copy(input_image)  # store for later

            # now make the move
            jx_new, jy_new = jx + self.directions[
                pick][:, 0], jy + self.directions[pick][:, 1]

            # really make it if there is no wall:
            # again, we do this for all samples in parallel,
            # thanks to advanced numpy array indexing
            can_move = np.array(world[self.allsamples, jx_new, jy_new] == 0,
                                dtype='int')  # 1 if can move
            input_image[
                self.allsamples, jx, jy,
                2] -= can_move  # delete old position only if we can move
            jx, jy = jx * (1 - can_move) + jx_new * can_move, jy * (
                1 - can_move) + jy_new * can_move

            # get a reward if on a treasure chest!
            r = reward[self.allsamples, jx,
                       jy]  # will be array of batchsize length
            if self.delete_treasure:
                reward[self.allsamples, jx,
                       jy] -= 1 * (r > 0)  # delete treasure!

            # punish a bit whenever we moved into a wall!
            r -= self.punish_wall_hit * (1 - can_move)
            R += r

        return (R)

    def run(self,
            ntrials=1000,
            skipsteps=20,
            do_visualize=True,
            do_show_test_batch=False,
            do_plot_trajectories=True,
            P_cmap='viridis'):
        """
        Run the policy gradient training for 'ntrials' training
        trials, each containing a number of trajectories given
        by 'batchsize' (as initialized when first creating the
        MazeRL object; you can reset that using 'setup').
        
        Visualize every skipsteps steps.
        
        Every call to this method trains further, and it does not
        delete the old stored cost values, so it keeps displaying
        the overall progress in training from the start.
        """
        self.P_cmap = P_cmap

        if self.costs is not None:
            self.costs = np.append(self.costs, np.zeros(ntrials))
            self.Returns = np.append(self.Returns,
                                     np.zeros([self.batchsize, ntrials]),
                                     axis=1)
        else:
            self.costs = np.zeros(ntrials)
            self.Returns = np.zeros([self.batchsize, ntrials])

        # try many trajectories:
        for trial in range(ntrials):
            # prepare the mazes of the batch (and starting positions, and treasure distribution):
            if not self.single_maze:  # generate a fresh maze for every sample in the batch
                self.reward, self.input_image = self.prepare_batch(
                    batchsize=self.batchsize,
                    world=self.world,
                    jx=self.jx,
                    jy=self.jy)
            else:  # keep the one single maze
                for sample in range(self.batchsize):
                    self.jx[sample], self.jy[sample] = 1, 1
                    self.input_image[sample, :, :, 2] = 0
                    self.input_image[sample, self.jx[sample], self.jy[sample],
                                     2] = 1
                    # the following good for starting from random positions, but the same maze
                    if self.choose_random_positions_for_single_maze:
                        while True:
                            jx_try, jy_try = np.random.randint(self.M, size=2)
                            jx_try, jy_try = np.random.randint(self.M, size=2)
                            if self.world[sample, jx_try, jy_try] == 0:
                                self.jx[sample], self.jy[
                                    sample] = jx_try, jy_try
                                self.input_image[sample, :, :, 2] = 0
                                self.input_image[sample, self.jx[sample],
                                                 self.jy[sample],
                                                 2] = 1  # place robot
                                break

            # run a single full trial
            # as usual, all arrays are passed by reference and will be filled
            # with new values in this routine
            self.R = self.run_one_trial(input_image=self.input_image,
                                        input_images=self.input_images,
                                        reward=self.reward,
                                        jx=self.jx,
                                        jy=self.jy,
                                        actions=self.actions,
                                        position=self.position,
                                        world=self.world,
                                        allsamples=self.allsamples,
                                        batchsize=self.batchsize)

            # store the return
            self.Returns[:, -(ntrials - trial)] = self.R

            # use policy gradient update rule to adjust
            # probabilities!
            # first: make an array for the target policy distributions
            # for all time steps (those that contain 'R' in the slot
            # of the action that was actually taken!)
            if not self.train_for_action_map:
                self.target_distributions = np.zeros(
                    [self.batchsize, self.nsteps, self.num_actions])
                for t in range(self.nsteps):  # go through the trajectory again
                    a = self.actions[:,
                                     t]  # remember the action taken at step t
                    self.target_distributions[
                        self.allsamples, t,
                        a] = self.R  # reinforce that action!
                self.costs[trial] = self.Policy.train_on_batch(
                    np.reshape(
                        self.input_images,
                        [self.batchsize * self.nsteps, self.M, self.M, 3]),
                    np.reshape(
                        self.target_distributions,
                        [self.batchsize * self.nsteps, self.num_actions]))
            else:
                self.target_distributions = np.zeros([
                    self.batchsize, self.nsteps, self.M, self.M,
                    self.num_actions
                ])
                for t in range(self.nsteps):  # go through the trajectory again
                    a = self.actions[:,
                                     t]  # remember the action taken at step t
                    self.target_distributions[
                        self.allsamples, t, self.position[:, t, 0],
                        self.position[:, t,
                                      1], a] = self.R  # reinforce that action!
                self.costs[trial] = self.Policy.train_on_batch(
                    np.reshape(
                        self.input_images,
                        [self.batchsize * self.nsteps, self.M, self.M, 3]),
                    np.reshape(self.target_distributions, [
                        self.batchsize * self.nsteps, self.M, self.M,
                        self.num_actions
                    ]))

            # we needed the reshape to make sure the input is still
            # of shape [total_batchsize,M,M,3]
            # ...only now total_batchsize is larger!
            # this will run through the update algorithm a larger number
            # of samples in parallel, which again is very efficient!

            # visualize!
            if do_visualize and trial % skipsteps == 0 or trial == ntrials - 1:
                self.visualize(do_show_test_batch)
                if do_plot_trajectories:
                    self.plot_trajectories()

    def visualize(self, do_show_test_batch=False):
        """
        Visualize the current training status:
        Cost function and either of the two:
        performance on a batch of test samples (if do_show_test_batch
        was True) or policy as a function of position (otherwise)
        """
        if do_show_test_batch:
            current_test_reward = np.copy(
                self.test_reward)  # avoid changes in the test_reward!
            R_test = self.run_one_trial(input_image=self.test_input_image,
                                        input_images=self.test_input_images,
                                        reward=self.current_test_reward,
                                        jx=self.test_jx,
                                        jy=self.test_jy,
                                        actions=self.test_actions,
                                        position=self.test_position,
                                        world=self.test_world,
                                        allsamples=self.test_allsamples,
                                        batchsize=self.test_batchsize)

        clear_output(wait=True)
        fig = plt.figure(constrained_layout=True,
                         figsize=(self.num_actions, 3))
        gs = fig.add_gridspec(ncols=self.num_actions, nrows=3)
        returns_plot = fig.add_subplot(gs[0:2, :])

        # show all the returns so far (averaged over the batch for each trial)
        returns_plot.plot(np.average(self.Returns, axis=0))
        # all the returns, in all trials (averaged over batch!)
        returns_plot.set_title("Return vs trial (" + str(self.batchsize) +
                               " trajs/trial)")

        if do_show_test_batch:
            n_test_plots = self.test_batchsize
        else:
            n_test_plots = self.num_actions

        test_plot = []
        for n in range(n_test_plots):
            test_plot.append(fig.add_subplot(gs[2, n]))

        if not do_show_test_batch:
            self.plot_try_pos(self.orig_test_world,
                              self.orig_test_reward,
                              ax=test_plot,
                              target=[self.chest_x, self.chest_y])
        else:
            # show what's happened in this test trial
            for n in range(self.test_batchsize):
                # draw the trajectory of current batch sample 0!
                picture = np.zeros([self.M, self.M,
                                    3])  # last index: red/green/blue
                picture[:, :, 0] = self.test_world[n, :, :]  # walls are red
                for j in range(self.nsteps):  # highlight trajectory
                    picture[self.test_position[n, j,
                                               0], self.test_position[n, j, 1],
                            1] = 0.5 * (1.0 + (1.0 * j) / self.nsteps)
                # put a bright pixel at the positions visited
                # highlight the target sites!
                picture[:, :, 2] += 1 * (self.test_reward[n, :, :] > 0)

                # show picture (transpose is needed because
                # otherwise the first coordinate jx is plotted upwards,
                # not to the right)
                test_plot[n].imshow(np.transpose(picture, [1, 0, 2]),
                                    origin='lower')
                test_plot[n].axis('off')
        plt.show()
        if self.extra_visualizer is not None:
            self.extra_visualizer(self)  # call some extra user-defined routine

    def plot_trajectories(self, width=5):
        world = self.world
        reward = self.reward
        position = self.position
        R = self.R
        M = self.M
        indices = np.argsort(R)[::-1]
        height = int(len(R) / width)
        if height * width < len(R):
            height += 1
        fig, ax = plt.subplots(ncols=width,
                               nrows=height,
                               figsize=[width, height])
        mx = 0
        my = 0
        for k in indices:
            ax[my,
               mx].imshow(np.transpose(world[k, :, :] + 0.6 * reward[k, :, :]),
                          origin='lower')
            #             ax[my,mx].plot(position[k,:,0],position[k,:,1],linewidth=3,color="orange")
            ax[my, mx].scatter(position[k, :, 0],
                               position[k, :, 1],
                               s=5,
                               zorder=10,
                               alpha=0.5)
            ax[my, mx].set_xlim(-1, M)
            ax[my, mx].set_ylim(-1, M)
            ax[my, mx].axis('off')
            ax[my, mx].set_title(str(int(R[k])), x=0.5, y=0.7)
            mx += 1
            if mx >= width:
                mx = 0
                my += 1
        plt.show()

    def try_pos(self, x, y, world, reward):
        try_image = np.zeros([1, self.M, self.M, 3])
        try_image[0, x, y, 2] = 1.0
        try_image[0, :, :, 0] = world[0, :, :]
        try_image[0, :, :, 1] = reward[0, :, :]
        if not self.train_for_action_map:
            return (self.Policy.predict_on_batch(try_image)[0])
        else:
            return (self.Policy.predict_on_batch(try_image)[0, x, y, :])

    def plot_try_pos(self, world, reward, ax=None, target=None):
        M = self.M
        P = np.zeros([M, M, self.num_actions])

        for x in range(M):
            for y in range(M):
                P[x, y, :] = self.try_pos(x, y, world, reward)

        if ax is None:
            fig, ax = plt.subplots(ncols=self.num_actions, nrows=1)
            ax_was_none = True
        else:
            ax_was_none = False

        for n in range(self.num_actions):
            ax[n].imshow(np.transpose(P[:, :, n] + world[0, :, :]),
                         origin='lower',
                         vmin=0.0,
                         vmax=1.0,
                         cmap=self.P_cmap)
            ax[n].axis('off')
            ax[n].set_title(str(self.directions[n]))
            w = np.where(reward[0, :, :] > 0.5)
            if len(w[0]) > 0:
                ax[n].scatter(w[0],
                              w[1],
                              c="orange",
                              s=5,
                              alpha=0.5,
                              linewidth=2)
#             if target is not None:
#                 ax[n].scatter([target[0]],[target[1]],c="orange")

        if np.any(np.abs(np.sum(P, axis=2) - 1.0) > 0.01):
            print("WARNING: Probabilities do not sum up to 1!",
                  np.sum(P, axis=1))
        if ax_was_none:
            plt.show()

    def refresh_returns(self):
        self.Returns = None
        self.costs = None  # will be re-initialized on next 'run'
Exemple #9
0
plot_img_pixels = 1
plot_img_cols = 5
plot_img_rows = 3

vals = np.linspace(-1, 1, M)
x, y = np.meshgrid(vals, vals)

y_test = np.zeros([1, M, M, 1])
y_test[:, :, :, 0] = my_generator(1, x, y)

y_in = np.zeros([batchsize, M, M, 1])

costs = np.zeros(steps)
extractor = get_layer_activation_extractor(Net)

y_test_out = Net.predict_on_batch(y_test)
fig = plt.figure(constrained_layout=True, figsize=(8, 4))
gs = fig.add_gridspec(ncols=8, nrows=4)
filter_plot = fig.add_subplot(gs[0:3, 0:4])
test_in_plot = fig.add_subplot(gs[0:2, 4:6])
test_out_plot = fig.add_subplot(gs[2:4, 6:8])

test_in_plot.imshow(y_test[0, :, :, 0], origin='lower')
test_out_plot.imshow(y_test_out[0, :, :, 0], origin='lower')
test_in_plot.axis('off')
test_out_plot.axis('off')

if show_intermediate_layers:
    features = extractor(y_test)
    n1 = 0
    n2 = 0
Exemple #10
0
def forecast(request):

    if "GET" == request.method:
        return render(request, 'forecast.html', {})
    else:
        excel_file = request.FILES["excel_file"]

        # reading the excel file
        wb = openpyxl.load_workbook(excel_file)
        worksheet = wb.active

        # converting worksheet to DataFrame
        df = pd.DataFrame(worksheet.values)
        month = list(df.drop(0, axis=0)[0])
        passeng = list(df.drop(0, axis=0)[1])
        Data = pd.DataFrame({'Month': month, 'Passengers': passeng})
        Data.Month = pd.to_datetime(Data.Month)

                
        n_input = 12
        n_features = 1

        # creating our model
        model = Sequential()
        model.add(LSTM(200, activation='relu', input_shape=(n_input, n_features)))
        model.add(Dropout(0.20))
        model.add(Dense(1))
        model.compile(optimizer='adam', loss='mse')

        # Scale 
        Data = Data.set_index('Month')
        train = Data
        scaler = MinMaxScaler()
        scaler.fit(train)
        train = scaler.transform(train)
        
        # Generating Time Series format
        generator = TimeseriesGenerator(train, train, length=n_input, batch_size=6)

        # Fitting our model
        model.fit_generator(generator,epochs=90, verbose= 0)

        # Predection
        pred_list = []  
        batch = train[-n_input:].reshape((1, n_input, n_features))
        for i in range(n_input):
            pred_list.append(model.predict_on_batch(batch)[0])      
            batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
        add_dates = [Data.index[-1] + DateOffset(months=x) for x in range(13)]
        future_dates = pd.DataFrame(index=add_dates[1:], columns=Data.columns)
        data_predict = pd.DataFrame(scaler.inverse_transform(pred_list), index = future_dates[-12:].index, columns= ['Predictions'])
        
        # Preparing for html page
        data_predictN = data_predict.reset_index()
        data_predictN['index']=data_predictN['index'].apply(lambda x: x.strftime("%m/%Y"))        
        dataF = data_predictN.values
        all_data = [{'Month': dataF[i][0], 'Passenger': dataF[i][1]} for i in range(len(dataF))]
        
        # Plotting the prediction
        df_proj = pd.concat([Data,data_predict], axis=1)
        plt.plot(df_proj.index, df_proj['Passengers'])
        plt.plot(df_proj.index, df_proj['Predictions'], color='r')
        plt.legend(loc='best', fontsize='xx-large')
        plt.xticks(fontsize=18, color = "white")
        plt.yticks(fontsize=16, color = "white")
        fig = plt.gcf()
        buf = io.BytesIO()
        fig.savefig(buf, format='png')
        buf.seek(0)
        string = base64.b64encode(buf.read())
        url = urllib.parse.quote(string)

        # Deleating our model to prevent data leakage
        del model
        gc.collect()
        K.clear_session()
        tf.compat.v1.reset_default_graph()

        return render(request, 'forecast.html', {"all_data":all_data, 'plot_div': url})
Net=Sequential()
Net.add(PeriodicConvolution(kernel_size=3))

Net.compile(loss='mean_square_error', optimizer='adam')


# In[4]:


y_in=np.array([[0.,0.,3.,0.,0.]])


# In[5]:


y_out=Net.predict_on_batch(y_in)
print(y_out)


# In[6]:


Net.layers[0].w


# In[7]:


Net.layers[0].w.assign(np.array([-1,0,1]))