Exemplo n.º 1
0
    def EvaluateModel(self, val_x, val_y, batch_size):
        """
        By evaluating the loss at unseen data we define a metric for the 
        effectiveness of the training, and therefore the models effectiveness
        for classification
        Accuracy is defined as:
            SUM(Wx+b && y_actual)/n_samples
        """

        #        Define the graph operation for accuracy
        acc = tf.reduce_mean(
            tf.cast(tf.equal(self.GetPredictions(), self.y), tf.float32))

        #        Load in the model weights
        #         self.LoadModel(self.sess)
        accuracies = []
        losses = []

        for i in range(val_y.shape[0] // batch_size):
            #            Get the accuracy of each batch of evaluation data
            x, y = self._get_batches(val_x, val_y, batch_size)
            accuracies.append(
                self.sess.run(acc, feed_dict={
                    self.input_: x,
                    self.labels_: y
                }))
            losses.append(
                self.sess.run(self.loss,
                              feed_dict={
                                  self.input_: x,
                                  self.labels_: y
                              }))
#        Return mean accuracy on evaluation data
        return [np.mean(losses), np.mean(accuracies)]
Exemplo n.º 2
0
    def train(self, X_train, noisy=False):
        """
        Train the regularized parametric t-SNE network
        """
        print('Start training the neural network...')
        X_train = X_train.copy()
        y_train = X_train
        if noisy:
            noise = np.random.normal(0, 0.01**0.5, (X_train.shape))
            X_train = X_train + noise
        begin = time()
        losses = []

        n_sample, n_feature = X_train.shape
        nBatches = int(n_sample / self.batch_size)
        for epoch in range(self.epochs):
            new_indices = np.random.permutation(
                n_sample)  # shuffle data for new random batches
            X = X_train[new_indices]
            Y = y_train[new_indices]
            loss = 0

            for i in range(nBatches):
                batch_y = Y[i * self.batch_size:(i + 1) * self.batch_size]
                batch = X[i * self.batch_size:(i + 1) * self.batch_size]
                if self.theta > 0:  # runs faster this way
                    blockPrint()
                    cond_p, _ = cond_probs(batch.copy(),
                                           perplexity=self.perplexity)
                    P = joint_average_P(cond_p)
                    enablePrint()
                    if self.theta == 1:  # parametric t-sne
                        all_losses = self.encoder.train_on_batch(
                            x=batch, y={'encoded': P})
                    else:  # regularized parametric t-sne
                        all_losses = self.model.train_on_batch(x=batch,
                                                               y={
                                                                   'encoded':
                                                                   P,
                                                                   'decoded':
                                                                   batch_y
                                                               })
                else:  # autoencoder with mse loss
                    all_losses = self.model.train_on_batch(
                        x=batch, y={'decoded': batch_y})

                loss += np.array(all_losses)

            losses.append(loss / nBatches)
            #losses[epoch] = loss/nBatches
            print('Epoch: %.d elapsed time: %.2f losses: %s ' %
                  (epoch + 1, time() - begin, losses[epoch]))

        return losses
Exemplo n.º 3
0
def fit_n(n, model_generator, loss, x, y):
    """ Fits n models and returns the one with the lowest cost. """

    n_inputs = x.shape[1]
    models = [model_generator(loss, n_inputs) for _ in range(n)]
    losses = []
    for model in models:
        history = model.fit(x, y, batch_size=32, epochs=20000, verbose=1)
        loss_i = history.history["loss"][-1]
        losses.append(loss_i)

    print(losses)
    winner = models[np.argmin(losses)]
    return winner
Exemplo n.º 4
0
    def train(self):
        print("Train:")
        losses = []
        generator_train = self.reader.batch_generator_train()
        for el in generator_train:
            loss = self.obj_model.train_on_batch(x=el[0], y=el[1])
            losses.append(loss)
            print("loss:", loss)
        self.losses["loss"].append(np.mean(np.asarray(losses).astype(float)) if len(losses) > 0 else -1.0)

        print("Validation:")
        val_losses = []
        generator_valid = self.reader.batch_generator_train("valid")
        for el in generator_valid:
            val_loss = self.obj_model.test_on_batch(x=el[0], y=el[1])
            val_losses.append(val_loss)
            print("val_loss:", val_loss)
        self.losses["val_loss"].append(np.mean(np.asarray(val_losses).astype(float)) if len(val_losses) > 0 else -1.0)
def min_loss(fn,epochs,batch_shape):
    t0 = datetime.now()
    losses = []
    x = np.random.randn(np.prod(batch_shape))
    for i in range(epochs):
        x, l, _ = scipy.optimize.fmin_l_bfgs_b(func=fn,x0=x,maxfun=20)
    # bounds=[[-127, 127]]*len(x.flatten())
    #x = np.clip(x, -127, 127)
    # print("min:", x.min(), "max:", x.max())
        print("iter=%s, loss=%s" % (i, l))
        losses.append(l)
    print("duration:", datetime.now() - t0)
    plt.plot(losses)
    plt.show()

    newimg = x.reshape(*batch_shape)
    final_img = unpreprocess(newimg)
    return final_img[0]    
def evaluate_LSTM_model(X_train, y_train, n_neurons1,n_neurons2):
    losses = []  
    model = Sequential()
    model.add(LSTM(n_neurons1, batch_input_shape=(n_batch, X_train.shape[1], X_train.shape[2]), activation="relu",
                  return_sequences=True, stateful=True, dropout=0.2))
#model.add(Reshape((n_batch, X_train.shape[1], X_train.shape[2]), input_shape=(5,)))    
#dropout Fraction of the units to drop for the linear transformation of the inputs.
    model.add(LSTM(n_neurons2, batch_input_shape=(n_batch, X_train.shape[1], X_train.shape[2]), stateful=True))
    model.add(Dense(y_train.shape[1]))
    #adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0)
    model.compile(loss='mean_squared_error', optimizer="adam")
    history = model.fit(X_train, y_train, epochs=2,verbose=2, batch_size=n_batch,shuffle=False)
    losses.append(history.history['loss'])
    #val_losses.append(history.history['val_loss'][0])
    forecast = model.predict(X_test,batch_size=n_batch)
    predicted = np.reshape(forecast, (forecast.size,))
    actual =np.reshape(y_test, (y_test.size,))
    error = mean_squared_error(actual,  predicted)
    model.reset_states()
    return error
Exemplo n.º 7
0
def get_losses(ground_truth, outputs):
    kp_maps_true, short_offset_true, mid_offset_true, long_offset_true, seg_true, crowd_mask, unannotated_mask, overlap_mask = ground_truth
    kp_maps, short_offsets, mid_offsets, long_offsets, seg_mask = outputs

    losses = []
    losses.append(
        KL.Lambda(kp_map_loss)(
            [kp_maps_true, kp_maps, unannotated_mask, crowd_mask]))
    losses.append(
        KL.Lambda(short_offset_loss)(
            [short_offset_true, short_offsets, kp_maps_true]))
    losses.append(
        KL.Lambda(mid_offset_loss)(
            [mid_offset_true, mid_offsets, kp_maps_true]))
    losses.append(
        KL.Lambda(segmentation_loss)([seg_true, seg_mask, crowd_mask]))
    losses.append(
        KL.Lambda(long_offset_loss)([
            long_offset_true, long_offsets, seg_true, crowd_mask,
            unannotated_mask, overlap_mask
        ]))

    return losses
Exemplo n.º 8
0
def evaluate_loss_bad(model, excerpt, char_to_ix, vocab_size):
    # something isn't right here..
    X = np.zeros((1, len(excerpt), vocab_size))
    losses = []
    cum_loss = 0.
    ix = char_to_ix[excerpt[0]]
    X[0, 0, :][ix] = 1
    for i in range(1, len(excerpt)):
        char = excerpt[i]
        weights = model.predict(X[:, :i + 1, :], batch_size=1)[0][-1]
        #       print(sum(weights))
        #        print(weights)
        ix = char_to_ix[char]
        for weight in weights:
            if weight < fuzz_factor:
                weight = fuzz_factor
            if weight > 1. - fuzz_factor:
                weight = 1. - fuzz_factor
        loss = -np.log(weights[ix])
        losses.append((char, weights[ix], loss))
        cum_loss += loss
        print('Char: ', char, ' Loss: ', loss, '  Loss so far: ', cum_loss / i)
        X[0, i, :][ix] = 1
    return losses
Exemplo n.º 9
0
def RewardStretchedEnsemble(trainingIteration,fit=True):
    X,Y,testData,testValue,scalers,cA_list,cD_list=dataSetup(multivariate=True,time_steps_back=time_steps_back,printDates=True)
    trainX,valX,trainY,valY=X[:-validation_size],X[-validation_size:],Y[:-validation_size],Y[-validation_size:]

    X,Y,testData_single,testValue,scalers,cA_list,cD_list=dataSetup(time_steps_back=time_steps_back)
    trainX_single,valX_single,trainY_single,valY_single=X[:-validation_size],X[-validation_size:],Y[:-validation_size],Y[-validation_size:]

    gamma=20
    num_models=len(multivariate_models)+len(singlevariate_models)
    
    ensembleModel=Ensemble_Network(32,'mse',num_models)

    ensemble_X,ensemble_Y=addRewardData(trainingIteration)
    print("Training ensemble...")
    if(fit):
        ensembleModel.fit(ensemble_X, ensemble_Y, epochs=25, batch_size=10, verbose=0)
    else:
        ensembleModel.load_weights('ensemble.h5')
    ensembleModel.save_weights('ensemble.h5')

    actual=[]
    preds=[]
    for i in range(1,63):
        actual.append(testValue)

        idx = np.random.choice(np.arange(len(ensemble_Y)), 20, replace=False)
    
        x_sample = ensemble_X[idx]
        y_sample = ensemble_Y[idx]
        
        ensembleModel.fit(x_sample, y_sample, epochs=5, batch_size=5, verbose=0)

        ensemble_prediction=ensembleModel.predict(testData)
        print(ensemble_prediction)
        best=np.argmax(ensemble_prediction)

        ensemble_X=np.concatenate((ensemble_X, testData))


        addLine=[]
        losses=[]
        j=0

        cD_list_old=cD_list
        scalers_old=scalers

        X,Y,testData,testValue,scalers,cA_list,cD_list=dataSetup(multivariate=True,testDate=i,time_steps_back=time_steps_back)
        X,Y,testData_single,testValue,scalers,cA_list,cD_list=dataSetup(time_steps_back=time_steps_back,testDate=i)
        
        for key, value in multivariate_models.items():
            value.load_weights("weights/"+key+"_"+str(trainingIteration)+'.h5')
            pred=value.predict(testData)
            if(j==best):
                ensemblePred=pred
            loss=abs(pred-Y[-1])
            losses.append(loss)
            reward=1/(1+abs(pred-Y[-1]))
            reward=reward**gamma
            addLine.append(reward)
            j+=1


        for key, value in singlevariate_models.items():
            value.load_weights("weights/"+key+"_"+'.h5')
            pred=value.predict(testData_single)
            if(j==best):
                ensemblePred=pred
            loss=abs(pred-Y[-1])
            losses.append(loss)
            reward=1/(1+abs(pred-Y[-1]))
            reward=reward**gamma
            addLine.append(reward)
            j+=1

        y=scalers_old.inverse_transform(ensemblePred)
        reconstruct=pywt.idwt(np.reshape(y, (y.shape[0])), cD_list_old[-1], 'haar')
        preds.append(reconstruct[-1])

        addLine=np.reshape(addLine, (1,num_models))

        ensemble_Y=np.concatenate((ensemble_Y, addLine))            

    mae=metrics.mean_absolute_error(np.array(preds), np.array(actual))

    print("Training iteration MAE")
    print(mae)
    return preds, actual
plt.title('model train vs validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()

###################################################
verbose=1
losses = []
val_losses = []
min_val_loss = (99999,999999)
for i in range(training_epochs):
    if verbose!=0:
        print(i)
    history = model.fit(X_train, y_train, validation_data=(X_val,y_val), epochs=2, batch_size=n_batch, verbose=2, shuffle=False)
    losses.append(history.history['loss'])
    val_losses.append(history.history['val_loss'][0])
    if val_losses[-1] < min_val_loss[0]:
        min_val_loss = (val_losses[-1], i)
#    model.reset_states()
print('best val_loss and epoch:',min_val_loss)
plt.title('loss')
plt.plot(losses,color="blue")
plt.plot(val_losses, color='red')
plt.show()


# forecasting
pred_y = model.predict(X_test,batch_size=n_batch)
inv_yhat = scalery.inverse_transform(pred_y)
real_test=scale_Y[-n_test:]
Exemplo n.º 11
0
def train_model(env):
    TIME_STEPS = int(1e6)
    STACK_SIZE = 4
    memories = deque(maxlen=int(1e6) + STACK_SIZE - 1)
    NUM_ACTIONS = env.action_space.n
    GAMMA = 0.99
    EPSILON_DECAY_PERIOD = int(5e5)
    MIN_EPSILON = 0.1
    C = 1000
    BATCH_SIZE = 32
    losses = []
    TRAINING_FRAMES = int(10e6)
    frames_played = 0

    q_model = init_model(NUM_ACTIONS)
    target_q_model = init_model(NUM_ACTIONS)
    curr_time = time.time()
    main_prefix = "main-{}".format(curr_time)
    f_main_out = open("{}.out".format(main_prefix), "w+", buffering=1)
    q_prefix = "q_vals-{}".format(curr_time)
    q_vals = []
    f_q_out = open("{}.out".format(q_prefix), "w+")

    for i_frame in range(TRAINING_FRAMES):
        env.reset()
        action = env.action_space.sample()
        prev, _, _, _ = env.step(action)
        action = env.action_space.sample()
        observation, reward, terminal, info = env.step(action)
        s_t = preprocess_frame(observation, prev)
        for i in range(STACK_SIZE):
            action = env.action_space.sample()
            observation, reward, terminal, info = env.step(action)
            s_t_next = preprocess_frame(observation, prev)
            mem = (s_t, action, reward, s_t_next, terminal)
            memories.append(mem)
            s_t = s_t_next
            prev = observation
        phi_t = preprocess_memory(memories, 0, STACK_SIZE)[0]
        loss = 0
        i_episode = 0
        for t in range(TIME_STEPS):
            frames_played += 1
            epsilon = calculate_epsilon(EPSILON_DECAY_PERIOD, frames_played,
                                        MIN_EPSILON)
            # With some probability epsilon select random a.
            if np.random.uniform() <= epsilon:
                action = env.action_space.sample()
            else:
                preds = q_model.predict(np.expand_dims(np.array(phi_t),
                                                       axis=0))
                action = np.argmax(preds)
                q_vals.append(np.max(preds))
                f_q_out.write("{}\n".format(np.max(preds)))

            observation, reward, terminal, info = env.step(action)
            s_t_next = preprocess_frame(observation, prev)
            prev = observation
            mem = (s_t, action, reward, s_t_next, terminal)
            memories.append(mem)
            s_t = s_t_next
            num_memories = len(memories) - STACK_SIZE
            idxs = random.sample(range(num_memories),
                                 min(BATCH_SIZE, num_memories))
            mem_batch = []
            for i in idxs:
                mem_batch.append(preprocess_memory(memories, i, STACK_SIZE))
            m_phi, m_action, m_reward, m_phi_next, m_terminal = zip(*mem_batch)
            q_s_next = target_q_model.predict(np.array(m_phi_next))
            y = q_model.predict(np.array(m_phi))
            for i, (mt, mr,
                    ma) in enumerate(zip(m_terminal, m_reward, m_action)):
                if mt:
                    y[i][ma] = mr
                else:
                    y[i][ma] = mr + GAMMA * np.max(q_s_next[i])

            l = q_model.train_on_batch(np.array(m_phi), y)
            loss += l
            if t % C == 0:
                target_q_model.set_weights(q_model.get_weights())
                target_q_model.save('model.h5')
            # print("epsilon: {}".format(epsilon))
            # print("frames played: {}".format(frames_played))
            # print("reward: {}".format(reward))
            # print("terminal: {}".format(terminal))
            # print("info: {}".format(info))
            # print("---------------------")
            if terminal:
                losses.append(loss)
                loss_str = "Loss after {}th episode: {}".format(
                    i_episode, loss)
                i_episode += 1
                episode_str = "Episode finished after {} timesteps".format(t +
                                                                           1)
                print(loss_str)
                print(episode_str)
                # Save progress.
                f_main_out.write("{}\n{}\n".format(loss_str, episode_str))
                target_q_model.set_weights(q_model.get_weights())
                target_q_model.save('model.h5')
            break
    env.close()
    f_main_out.close()
    f_q_out.close()
Exemplo n.º 12
0
def train_model(env):
    NUM_EPISODES = int(4e5)
    TIME_STEPS = int(1e6)
    memories = deque(maxlen=int(1e6))
    NUM_ACTIONS = env.action_space.n
    GAMMA = 0.99
    EPSILON_DECAY_PERIOD = int(2e5)
    MIN_EPSILON = 0.1
    C = 1000
    BATCH_SIZE = 32
    SEQ_LEN = 4
    f_recent = deque(maxlen=SEQ_LEN)
    losses = []
    MAX_FRAMES = int(10e6)
    frames_played = 0
    
    q_model = init_model()
    target_q_model = init_model()
    file_prefix = "car-{}".format(time.time())
    out_file = open("{}.out".format(file_prefix), "w+", buffering=1)

    for i_episode in range(NUM_EPISODES):
        if frames_played >= MAX_FRAMES:
            target_q_model.set_weights(q_model.get_weights())
            target_q_model.save('model.h5')
            break
        env.reset()
        action = env.action_space.sample()
        prev, _, _, _ = env.step(action)
        for i in range(SEQ_LEN):
            action = env.action_space.sample()
            observation, reward, terminal, info = env.step(action)
            s_t = preprocess_frame(observation, prev)
            f_recent.append(s_t)
            prev = observation
        phi_t = np.stack(np.array(f_recent), axis=-1)
        loss = 0
        for t in range(TIME_STEPS):
            frames_played += 1
            epsilon = calculate_epsilon(EPSILON_DECAY_PERIOD, frames_played, MIN_EPSILON)
            print("t:", t)
            # With some probability epsilon select random a. 
            if np.random.uniform() <= epsilon:
                action = env.action_space.sample()
            else:
                action = np.argmax(q_model.predict(np.expand_dims(np.array(phi_t), axis=0)))
            observation, reward, terminal, info = env.step(action)
            s_t_next = preprocess_frame(observation, prev)
            prev = observation
            f_recent.append(s_t_next)
            phi_t_next = np.stack(np.array(f_recent), axis=-1)
            mem = (phi_t, action, reward, phi_t_next, terminal)
            size = sys.getsizeof(mem)
            print(phi_t.shape)
            for obj in mem:
                print("i:", sys.getsizeof(obj))
                size += sys.getsizeof(obj)
            print("size:", size)
            memories.append(mem)
            s_t = s_t_next
            phi_t = phi_t_next
            mem_batch = random.sample(memories, min(BATCH_SIZE, len(memories)))
            m_phi, m_action, m_reward, m_phi_next, m_terminal = zip(*mem_batch)
            y = q_model.predict(np.array(m_phi))
            q_s_next = target_q_model.predict(np.array(m_phi_next))
            for i, (mt, mr, ma) in enumerate(zip(m_terminal, m_reward, m_action)):
                if mt:
                    y[i][ma] = mr
                else:
                    y[i][ma] = mr + GAMMA * np.max(q_s_next[i])

            l = q_model.train_on_batch(np.array(m_phi), y)
            loss += l
            if t % C == 0:
                print("Adjusting target network")
                target_q_model.set_weights(q_model.get_weights())
                target_q_model.save('car_dqn_model.h5')
            print("epsilon: {}".format(epsilon))
            print("frames played: {}".format(frames_played))
            print("reward: {}".format(reward))
            print("terminal: {}".format(terminal))
            print("info: {}".format(info))
            print("---------------------")
            if terminal:
                losses.append(loss)
                loss_str = "Loss after {}th episode: {}".format(i_episode, loss)
                episode_str = "Episode finished after {} timesteps".format(t+1)
                print(loss_str)
                print(episode_str)
                out_file.write("{}\n{}\n".format(loss_str, episode_str))
                target_q_model.set_weights(q_model.get_weights())
                target_q_model.save('model.h5')
                break
    env.close()
    out_file.close()

    plt.plot(list(range(len(losses))), losses, marker="o")
    plt.savefig("{}.png".format(file_prefix))