示例#1
0
def run(bs, path, lr, ks, num_layer):
    fold = 1
    for X_train, Y_train, X_val, Y_val, val_cat in zip(training_data,
                                                       training_label,
                                                       validation_data,
                                                       validation_label,
                                                       validation_cate_label):
        print("Fold " + str(fold))
        model = tools.create_model(lr, bs, ks, num_layer)
        inner_path = path + "/fold_" + str(fold)
        if not os.path.exists(inner_path):
            os.makedirs(inner_path)

        early_stop = EarlyStopping(patience=20)
        history = model.fit(x=X_train,
                            y=Y_train,
                            epochs=80,
                            validation_data=(X_val, Y_val),
                            callbacks=[early_stop],
                            batch_size=bs,
                            verbose=0)
        evaluation = model.evaluate(x=X_val, y=Y_val)
        validation_prediction = model.predict_classes(X_val, batch_size=bs)
        score = f1_score(val_cat, validation_prediction, average=None)

        tools.show_plot(inner_path, history)
        tools.write_file(inner_path + "/readme.txt", evaluation, score, model)
        fold = fold + 1
        del model
    def dnn(self, path, name):
        model = Sequential()
        print(len(self.X_train))
        model.add(Dense(self.INPUT_DIM, input_shape=(self.INPUT_DIM, )))
        model.add(Activation('relu'))
        for i in range(7):
            model.add(Dense(self.N_HIDDEN))
            model.add(Activation('relu'))
        model.add(Dense(1))
        model.add(Activation('linear'))
        model.summary()
        model.compile(loss='mse',
                      optimizer=self.OPTIMIZER,
                      metrics=['accuracy'])
        history = model.fit(self.X_train,
                            self.y_train,
                            epochs=self.NB_EPOCH,
                            verbose=self.VERBOSE)

        y_predict = model.predict(self.X_test)

        y_predict = y_predict.reshape(-1)

        mape_error = mean_absolute_percentage_error(self.y_test, y_predict)

        # save_info(self.y_test, y_predict, name, mape_error, self.WINDOW_SIZE, path, self.bs, self.ts)
        show_plot(self.y_test, y_predict)
        return model
    def lstm(self, path, name):
        trainX = np.reshape(self.X_train,
                            (self.X_train.shape[0], 1, self.X_train.shape[1]))
        testX = np.reshape(self.X_test,
                           (self.X_test.shape[0], 1, self.X_test.shape[1]))

        model = Sequential()
        model.add(
            LSTM(32,
                 batch_input_shape=(1, trainX.shape[1], trainX.shape[2]),
                 stateful=True))
        # model.add(Activation('tanh'))
        model.add(Dense(1))
        # model.add(Activation('linear'))
        model.compile(loss='mean_squared_error', optimizer='adam')
        # model.summary()

        for i in range(20):
            model.fit(trainX,
                      self.y_train,
                      epochs=1,
                      batch_size=1,
                      verbose=self.VERBOSE,
                      shuffle=False)
            model.reset_states()

        y_predict = model.predict(testX, batch_size=1)

        y_predict = y_predict.reshape(-1)

        mape_error = mean_absolute_percentage_error(self.y_test, y_predict)

        # save_info(self.y_test, y_predict, name, mape_error, self.WINDOW_SIZE, path, self.bs, self.ts)
        show_plot(self.y_test, y_predict)
        return model
示例#4
0
def train():
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    # cv2.imshow('im',x_train[0])
    # cv2.waitKey(1000)
    x_train = x_train.reshape(-1, H, W, 1).astype('float') / 255
    x_test = x_test.reshape(-1, H, W, 1).astype('float') / 255
    y_train = keras.utils.to_categorical(y_train, n_classes)
    y_test = keras.utils.to_categorical(y_test, n_classes)

    model.summary()
    history = model.fit(x_train,y_train,validation_data=(x_test,y_test),epochs=args.epoch,batch_size = args.batch_size)
    tools.show_plot(history,'./figure')
    # model.save('./model/mnistCNN.h5')
    scores = model.evaluate(x_test,y_test,batch_size=args.batch_size)
    print(scores)
示例#5
0
        1.,
        pow(pm.N, 1. / 2.) * pow(log(pm.N), 1. / 4.) / pow(horizon, 1. / 4.))
    return eta, gamma


if __name__ == "__main__":
    Arms = np.array([0.75, 0.5, 0.5])
    horizon = 10000
    nbReps = 32
    nbCores = cpu_count() / 2
    pm_game = games.BernoulliBandit(Arms)

    print "Bernoulli Bandit arms parameters:", Arms
    print
    pm_game.dump()
    tools.show_plot()
    print

    tools.init_plot("FeedExp3 Partial Monitoring Regret on a " +
                    str(len(Arms)) + "-armed Bandit")

    ## Random policy

    print "== Random Baseline =="
    baseline = BasicPolicy(pm_game)
    cumRegrets = tools.eval_policy_parallel(nbCores, nbReps, horizon, pm_game,
                                            baseline)
    tools.plot_regret(cumRegrets, mylabel="Random", mycolor='red')

    ## Basic FeedExp3 policy