Example #1
0
    def initialize(self,
                   mlsl,
                   nnl,
                   seed=None,
                   weight_range=1.0,
                   outputs_from_mlsl=None,
                   use_softmax=True):
        """
        Initialize an object of this class that binds a new NN on top
        of an existing MLSL object
        :param mlsl:
        :type mlsl: MLSL
        :param nnl:
        :type nnl: list
        :param seed:
        :type seed:
        :param weight_range:
        :type weight_range:
        :return:
        :rtype:
        """
        self.mlsl_output_size = mlsl.output_sizes[
            -1] if outputs_from_mlsl else outputs_from_mlsl

        # Change input size of Neural net to assigned feature size plus MLSL outputs
        nnl[0] += self.mlsl_output_size

        self.outputs_from_mlsl = outputs_from_mlsl

        self.mlsl = mlsl
        self.nnet = DNN()
        self.nnet.initialize(nnl=nnl, seed=seed, weight_range=weight_range)
        self.use_softmax = use_softmax
Example #2
0
def evaluate_mlp_ner():
    cws = DNN('mlp', mode=TrainMode.Sentence, is_seg=True, task='ner')
    model = 'tmp/mlp/mlp-ner-model1.ckpt'
    # print(cws.seg('在中国致公党第十一次全国代表大会隆重召开之际,中国共产党中央委员会谨向大会表示热烈的祝贺,向致公党的同志们', model,ner=True))
    print(cws.seg('多饮多尿多食', model, ner=True))
    print(cws.seg('无明显小便泡沫增多,伴有夜尿3次。', model, ner=True))
    print(cws.seg('无明显双脚疼痛,无间歇性后跛行,无明显足部红肿破溃', model, ner=True, debug=False))
Example #3
0
def get_ner(content, model_name):
    if model_name.startswith('tmp/mlp'):
        dnn = DNN('mlp', mode=TrainMode.Sentence, task='ner', is_seg=True)
    else:
        dnn = DNN('lstm', task='ner', is_seg=True)
    ner = dnn.seg(content, model_path=model_name, ner=True, trans=True)
    return ner[1]
Example #4
0
  def __init__(self, num_actions, observation_shape, dqn_params, cnn_params, folder):
    self.num_actions = num_actions
    self.observation_shape= observation_shape
    self.cnn_params = cnn_params
    self.folder = folder
    self.epsilon = dqn_params['epsilon']
    self.gamma = dqn_params['gamma']
    self.mini_batch_size = dqn_params['mini_batch_size']
    self.time_step = 0
    self.decay_rate = dqn_params['decay_rate']
    self.epsilon_min = dqn_params['epsilon_min']
    self.current_epsilon = self.epsilon

    self.use_ddqn = dqn_params['use_ddqn']
    self.print_obs = dqn_params['print_obs']
    self.print_reward = dqn_params['print_reward']

    self.startTraining = False

    #memory for printing reward and observations  
    self.memory = deque(maxlen=1000)

    #PER memory
    self.per_memory = Memory(dqn_params['memory_capacity'])

    #initialize network
    self.model = DNN(folder, num_actions, observation_shape, cnn_params)
    print("model initialized")

    #extra network for Double DQN
    if self.use_ddqn == 1:
        self.target_model = CNN(folder, num_actions, observation_shape, cnn_params)
Example #5
0
def main():
    x_train, y_train, x_test, y_test = data.mnist(one_hot=True)

    # Define Deep Neural Network structure (input_dim, num_of_nodes)
    layers = [[x_train.shape[1], 256], [256, 128], [128, 64]]

    # Initialize a deep neural network

    dnn = DNN(MODEL_FOLDER, os_slash, layers, params)

    pre_epochs = 100
    train_epochs = 100

    # Create auto-encoders and train them one by one by stacking them in the DNN
    pre_trained_weights = dnn.pre_train(x_train, pre_epochs)

    # Then use the pre-trained weights of these layers as initial weight values for the MLP
    history = dnn.train(x_train,
                        y_train,
                        train_epochs,
                        init_weights=pre_trained_weights)

    plot.plot_loss(history, loss_type='MSE')

    predicted, score = dnn.test(x_test, y_test)

    print("Test accuracy: ", score[1])

    dnn.model.save_weights(MODEL_FOLDER + os_slash + "final_weights.h5")
    dnn.model.save(MODEL_FOLDER + os_slash + "model.h5")
    save_results(score[1])
Example #6
0
    def __init__(self,
                 numpy_rng,
                 theano_rng=None,
                 cfg=None,
                 cfg_tower1=None,
                 cfg_tower2=None):

        self.layers = []
        self.params = []
        self.delta_params = []

        self.cfg = cfg
        self.cfg_tower1 = cfg_tower1
        self.cfg_tower2 = cfg_tower2

        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2**30))
        # allocate symbolic variables for the data
        self.x = T.matrix('x')
        self.y = T.ivector('y')

        self.input_tower1 = self.x[:, 0:cfg_tower1.n_ins]
        self.input_tower2 = self.x[:, cfg_tower1.n_ins:(cfg_tower1.n_ins +
                                                        cfg_tower2.n_ins)]

        self.dnn_tower1 = DNN(numpy_rng=numpy_rng,
                              theano_rng=theano_rng,
                              cfg=self.cfg_tower1,
                              input=self.input_tower1)
        self.dnn_tower2 = DNN(numpy_rng=numpy_rng,
                              theano_rng=theano_rng,
                              cfg=self.cfg_tower2,
                              input=self.input_tower2)
        concat_output = T.concatenate([
            self.dnn_tower1.layers[-1].output,
            self.dnn_tower2.layers[-1].output
        ],
                                      axis=1)
        self.dnn = DNN(numpy_rng=numpy_rng,
                       theano_rng=theano_rng,
                       cfg=self.cfg,
                       input=concat_output)

        self.layers.extend(self.dnn_tower1.layers)
        self.params.extend(self.dnn_tower1.params)
        self.delta_params.extend(self.dnn_tower1.delta_params)
        self.layers.extend(self.dnn_tower2.layers)
        self.params.extend(self.dnn_tower2.params)
        self.delta_params.extend(self.dnn_tower2.delta_params)
        self.layers.extend(self.dnn.layers)
        self.params.extend(self.dnn.params)
        self.delta_params.extend(self.dnn.delta_params)

        self.finetune_cost = self.dnn.logLayer.negative_log_likelihood(self.y)
        self.errors = self.dnn.logLayer.errors(self.y)
    def runDNNTrain(self, corpus=None, learningRate=0.02, hiddenUnitSize=[128, 256, 128], dataKey='Question', labelKey='y'):
        if corpus is None:
            corpus = self.prepareDF(excelLocation=TRAIN_EXCEL)

        self.dnnObject = DNN(pd_df_train=corpus,
                        pd_df_test=None,
                        learning_rate=learningRate,
                        hidden_units_size=hiddenUnitSize,
                        dataKey=dataKey,
                        labelKey=labelKey)
        result = self.dnnObject.run()
        return result
Example #8
0
 def create_columns(self, columns=5):
     for w in self.Ws:
         x_train = self.train_datasets[w]
         self.dnns[w] = []
         for i in range(columns):
             model = DNN(width=x_train.shape[1],
                         height=x_train.shape[2],
                         depth=x_train.shape[3],
                         classes=10)
             model.compile(loss="categorical_crossentropy",
                           optimizer=Adadelta(),
                           metrics=["accuracy"])
             self.dnns[w].append(model)
Example #9
0
    def __init__(self,
                 numpy_rng,
                 theano_rng=None,
                 cfg_si=None,
                 cfg_adapt=None):

        # allocate symbolic variables for the data
        self.x = T.matrix('x')
        self.y = T.ivector('y')

        # we assume that i-vectors are appended to speech features in a frame-wise manner
        self.feat_dim = cfg_si.n_ins
        self.ivec_dim = cfg_adapt.n_ins
        self.iv = self.x[:, self.feat_dim:self.feat_dim + self.ivec_dim]
        self.feat = self.x[:, 0:self.feat_dim]

        # the parameters
        self.params = []  # the params to be updated in the current training
        self.delta_params = []

        # the i-vector network
        dnn_adapt = DNN(numpy_rng=numpy_rng,
                        theano_rng=theano_rng,
                        cfg=cfg_adapt,
                        input=self.iv)
        self.dnn_adapt = dnn_adapt

        # the final output layer which has the same dimension as the input features
        linear_func = lambda x: x
        adapt_output_layer = HiddenLayer(
            rng=numpy_rng,
            input=dnn_adapt.layers[-1].output,
            n_in=cfg_adapt.hidden_layers_sizes[-1],
            n_out=self.feat_dim,
            activation=linear_func)
        dnn_adapt.layers.append(adapt_output_layer)
        dnn_adapt.params.extend(adapt_output_layer.params)
        dnn_adapt.delta_params.extend(adapt_output_layer.delta_params)

        dnn_si = DNN(numpy_rng=numpy_rng,
                     theano_rng=theano_rng,
                     cfg=cfg_si,
                     input=self.feat + dnn_adapt.layers[-1].output)
        self.dnn_si = dnn_si

        # construct a function that implements one step of finetunining
        # compute the cost for second phase of training,
        # defined as the negative log likelihood
        self.finetune_cost = dnn_si.logLayer.negative_log_likelihood(self.y)
        self.errors = dnn_si.logLayer.errors(self.y)
    def runDNNTrain(self,
                    learningRate=0.01,
                    hiddenUnitSize=[100, 100],
                    dataKey='Question',
                    labelKey='y'):
        corpusTrain = self.prepareDF(excelLocation=TRAIN_EXCEL)

        dnnObject = DNN(pd_df_train=corpusTrain,
                        pd_df_test=None,
                        learning_rate=learningRate,
                        hidden_units_size=hiddenUnitSize,
                        dataKey=dataKey,
                        labelKey=labelKey)
        result = dnnObject.run()
        return result
Example #11
0
def evaluate_lstm():
    cws = DNN('lstm', is_seg=True)
    model = 'tmp/lstm-model100.ckpt'
    print(cws.seg('小明来自南京师范大学', model, debug=True))
    print(cws.seg('小明是上海理工大学的学生', model))
    print(cws.seg('迈向充满希望的新世纪', model))
    print(cws.seg('我爱北京天安门', model))
    print(cws.seg('多饮多尿多食', model))
    print(cws.seg('无明显小便泡沫增多,伴有夜尿3次。无明显双脚疼痛,无间歇性后跛行,无明显足部红肿破溃', model))
def main2():
    dnn = DNN(input=28 * 28,
              layers=[DropoutLayer(160, LQ),
                      Layer(10, LCE)],
              eta=0.05,
              lmbda=1)  # 98%
    dnn.initialize_rand()
    train, test, vadilation = load_mnist_simple()

    f_names = [f'mnist_expaned_k0{i}.pkl.gz' for i in range(50)]
    shuffle(f_names)
    for f_name in f_names:
        print(f_name)
        with timing("load"):
            raw_data = load_data(f_name)
        with timing("shuffle"):
            shuffle(raw_data)
        with timing("reshape"):
            data = [(x.reshape((784, 1)), y)
                    for x, y in islice(raw_data, 100000)]
            del raw_data
        with timing("learn"):
            dnn.learn(data)
        del data
        print('TEST:', dnn.test(test))
Example #13
0
File: cnn.py Project: GRSEB9S/pdnn
    def __init__(self, numpy_rng, theano_rng=None, cfg=None, testing=False):

        self.layers = []
        self.params = []
        self.delta_params = []

        self.conv_layers = []

        self.cfg = cfg
        self.conv_layer_configs = cfg.conv_layer_configs
        self.conv_activation = cfg.conv_activation
        self.use_fast = cfg.use_fast

        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2**30))
        # allocate symbolic variables for the data
        self.x = T.matrix('x')
        self.y = T.ivector('y')

        self.conv_layer_num = len(self.conv_layer_configs)
        for i in xrange(self.conv_layer_num):
            if i == 0:
                input = self.x
            else:
                input = self.layers[-1].output
            config = self.conv_layer_configs[i]
            conv_layer = ConvLayer(numpy_rng=numpy_rng,
                                   input=input,
                                   input_shape=config['input_shape'],
                                   filter_shape=config['filter_shape'],
                                   poolsize=config['poolsize'],
                                   activation=self.conv_activation,
                                   flatten=config['flatten'],
                                   use_fast=self.use_fast,
                                   testing=testing)
            self.layers.append(conv_layer)
            self.conv_layers.append(conv_layer)
            self.params.extend(conv_layer.params)
            self.delta_params.extend(conv_layer.delta_params)

        self.conv_output_dim = config['output_shape'][1] * config[
            'output_shape'][2] * config['output_shape'][3]
        cfg.n_ins = config['output_shape'][1] * config['output_shape'][
            2] * config['output_shape'][3]

        self.fc_dnn = DNN(numpy_rng=numpy_rng,
                          theano_rng=theano_rng,
                          cfg=self.cfg,
                          input=self.layers[-1].output)

        self.layers.extend(self.fc_dnn.layers)
        self.params.extend(self.fc_dnn.params)
        self.delta_params.extend(self.fc_dnn.delta_params)

        self.finetune_cost = self.fc_dnn.logLayer.negative_log_likelihood(
            self.y)
        self.errors = self.fc_dnn.logLayer.errors(self.y)
Example #14
0
 def __init__(self, name='Goodone', net=None, train=0):
     """
     初始化
     net: 训练的神经网络
     verity:使用还是验证阶段
     验证阶段,神经网络未训练
     使用阶段,神经网络已训练
     """
     self.env = gym.make("MountainCarContinuous-v0")
     self.name = name
     self.simulation_step = 0.1
     self.units = 50
     self.ratio = 200
     self.reset()
     if net:
         self.net = net
     else:
         self.net = DNN(1, 1, self.units, train=train, name=self.name)
Example #15
0
def getDNN(loader, args):
    sys.path.insert(1, os.getcwd())
    #try:
    from dnn import DNN
    #     if args.rank == 0:
    #        print('Successfully imported your model.')
    # except:
    #    if args.rank == 0:
    #        print('Could not import your model. Exiting.')
    #    exit(-1)
    return DNN(loader.getXShape())
Example #16
0
  def __init__(self, nnet_conf, input_dim, output_dim, feature_conf, num_gpus = 1, 
               use_gpu = True, gpu_ids = '-1', summary_dir = None):
    ''' just some basic config for this trainer '''
    self.arch = nnet_conf['nnet_arch']

    #tensorflow related
    self.graph = None
    self.sess = None

    #feature related
    self.batch_size = feature_conf['batch_size']
    self.max_length = feature_conf.get('max_length', 0)
    self.jitter_window = feature_conf.get('jitter_window', 0)

    #nnet training & decoding
    self.buckets_tr = nnet_conf.get('buckets_tr', None)
    self.buckets = nnet_conf.get('buckets', None)

    # for learning rate schedule. None in default (means scheduler outside)
    # otherwise use prep_learning_rate
    self.global_step = None
    self.learning_rate = None

    #gpu related
    self.wait_gpu = True
    self.num_gpus = num_gpus
    self.use_gpu = use_gpu
    self.gpu_ids = gpu_ids

    #summary directory
    self.summary_dir = summary_dir

    if self.arch == 'dnn':
      self.model = DNN(input_dim, output_dim, self.batch_size, num_gpus)
    elif self.arch == 'bn':
      self.model = BN(input_dim, output_dim, self.batch_size, num_gpus)
    elif self.arch == 'lstm':
      self.model = LSTM(input_dim, output_dim, self.batch_size, self.max_length, num_gpus)
    elif self.arch == 'seq2class':
      self.model = SEQ2CLASS(input_dim, output_dim, self.batch_size, self.max_length, num_gpus,
                             buckets_tr = self.buckets_tr, buckets = self.buckets)
    elif self.arch == 'jointdnn':
      self.model = JOINTDNN(input_dim, output_dim, self.batch_size, self.max_length, num_gpus,
                            buckets_tr = self.buckets_tr, buckets = self.buckets,
                            mode = nnet_conf.get('mode', 'joint'))
    elif self.arch == 'jointdnn-sid':
      self.model = JOINTDNN(input_dim, output_dim, self.batch_size, self.max_length, num_gpus,
                            buckets_tr = self.buckets_tr, buckets = self.buckets, mode = 'sid')
    elif self.arch == 'jointdnn-asr':
      self.model = JOINTDNN(input_dim, output_dim, self.batch_size, self.max_length, num_gpus,
                            buckets_tr = self.buckets_tr, buckets = self.buckets, mode = 'asr')
    else:
      raise RuntimeError("arch type %s not supported", self.arch)
def main():
    train, test, vadilation = load_mnist_simple()
    # x, y = train[0]
    # print("x: ", x.shape)
    # print("y: ", y)

    with timing(f""):
        # dnn = DNN(input=28 * 28, layers=[Layer(30, LQ), Layer(10, LCE)], eta=0.05)  # 96%
        # dnn = DNN(input=28 * 28, layers=[Layer(30, LQ), Layer(10, SM)], eta=0.001)  # 68%
        # dnn = DNN(input=28 * 28, layers=[Layer(100, LQ), Layer(10, LCE)], eta=0.05, lmbda=5)  # 98%
        # dnn = DNN(input=28 * 28, layers=[DropoutLayer(100, LQ), Layer(10, LCE)], eta=0.05)  # 97.5%
        dnn = DNN(input=28 * 28, layers=[DropoutLayer(160, LQ), Layer(10, LCE)], eta=0.05, lmbda=3)
        dnn.initialize_rand()
        dnn.learn(train, epochs=30, test=vadilation, batch_size=29)

    print('test:', dnn.test(test))
    print(dnn.stats())
Example #18
0
def run_experiment(hparams):
    '''
    Google ML Engine entry point for training job.
    '''

    model = DNN(784, 10)

    init = tf.global_variables_initializer()

    with tf.Session() as session:
        tf.nn.in_top_k(tf.constant([[2.0, 9.0], [7.0, 5.0], [0.0, 0.0]]),
                       tf.constant([1, 0, 1]), 1).eval()

        session.run(init)

        for epoch in range(0, hparams.epochs):

            for X, Y in read_file_in_batches(hparams.train_file):
                session.run(
                    model.training,
                    feed_dict={
                        model.x: X,
                        model.y: Y
                    },
                )

            X, Y = get_eval_data(hparams.eval_file)

            print("Epoch(%s) Error: %s " %
                  (epoch,
                   session.run(model.error, feed_dict={
                       model.x: X,
                       model.y: Y
                   })))

        X, Y = get_eval_data(hparams.eval_file)

        for i in range(10):
            print("==================================")
            plt.imshow(np.array(X[i]).reshape([28, 28]))
            plt.show()
            print("::: %s" % Y[i])
            result = session.run(model.logits, feed_dict={model.x: [
                X[i],
            ]})
            print("::: %s" % result)
            print("::: Digit: %s" %
                  session.run(tf.argmax(result.reshape(10), 0)))
Example #19
0
def evaluate_mlp():
    cws = DNN('mlp', mode=TrainMode.Sentence)
    model = 'tmp/mlp-model20.ckpt'
    # print(cws.seg('小明来自南京师范大学', model, debug=True))
    # print(cws.seg('小明是上海理工大学的学生', model))
    # print(cws.seg('迈向充满希望的新世纪', model))
    # print(cws.seg('我爱北京天安门', model))
    # print(cws.seg('在中国致公党第十一次全国代表大会隆重召开之际,中国共产党中央委员会谨向大会表示热烈的祝贺,向致公党的同志们',model))
    print(cws.seg('多饮多尿多食', model))
    print(cws.seg('无明显小便泡沫增多,伴有夜尿3次。', model))
    print(cws.seg('无明显小便泡沫增多,伴有夜尿3次。', model, ner=True))
    print(cws.seg('无明显双脚疼痛,无间歇性后跛行,无明显足部红肿破溃', model))
def plot_accuracy():
    import numpy as np
    from matplotlib import pyplot as plt
    train, test, vadilation = load_mnist_simple()
    dnn = DNN(input=28 * 28, layers=[Layer(100, LQ), Layer(10, LCE)], eta=0.05, lmbda=1)
    for l in dnn.layers:
        l.w = np.random.random(l.w.shape) - 0.5
    acc1 = list(dnn.learn_iter(train, epochs=20, test=vadilation))
    dnn.initialize_rand()
    acc2 = list(dnn.learn_iter(train, epochs=20, test=vadilation))
    print(acc1)
    print(acc2)
    plt.plot(acc1)
    plt.plot(acc2)
    plt.show()
Example #21
0
def main():
    options = {
        'learning_rate': 0.1,
        'beta1': 0.9,
        'optimizer': 'gd',
        'loss': 'crossentropy'
    }

    train_x, test_x, train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes = load_data(
    )

    X = np.array([[1, 2], [1, 2], [4, 2]])
    Y = np.array([[0], [0], [0]])

    print(train_x.shape)
    print(test_x.shape)
    print(train_set_y_orig.shape)

    print(train_set_y_orig[0, 0:10])

    layers = [
        Dense(32, activation='relu'),
        Dense(5, activation='relu'),
        Dense(1, activation='sigmoid')
    ]

    print(len(layers))

    dnn = DNN(train_x, train_set_y_orig, layers, options)

    print(dnn.params.keys())

    #for param in sorted(dnn.params):
    #	print(param, dnn.params[param].shape)

    print(dnn)
    print(dnn.loss(dnn.predict(test_x), test_set_y_orig))

    dnn.train()
Example #22
0
    return float(sum(array))/len(array)
        

boston = datasets.load_boston()
#boston = datasets.make_regression()
#data = boston[0]
#target = boston[1]
data = boston.data
target = boston.target
matrix = Preprocess.to_matrix(list(data))
matrix = Preprocess.scale(matrix)
matrix = list(matrix)
target = list(target)
layers = [13,7,1]

dnn = DNN(matrix, target, layers, hidden_layer="TanhLayer", final_layer="LinearLayer", compression_epochs=5, smoothing_epochs=0, bias=True)
full = dnn.fit()
print full
#preds = [dnn.predict(d)[0] for d in matrix]
preds = [full.activate(d)[0] for d in matrix]

print "mrse preds {0}".format(mrse(preds, target))
print "rmse preds {0}".format(rmse(preds, target))

#mean = avg(target)
#mean = [mean for i in range(len(target))]
#print "mrse mean {0}".format(mrse(mean, target))
#print "rmse mean {0}".format(rmse(mean, target))

#for i in range(10):
#    d = matrix[i]
Example #23
0
maxseq_length = 100
embedding_size = 300
batch_size = 32
keep_prob = 1.0

test_data = read_data('data/test.txt')
test_data = np.array(test_data)
test_X = test_data[:,0]
test_Y = test_data[:,[-1]]

word2vec = word2vec_load()

if model_type == 'logistic':
    model = Logistic(maxseq_length, embedding_size)
elif model_type == 'dnn':
    model = DNN(maxseq_length, embedding_size)
elif model_type == 'rnn':
    model = RNN(batch_size, maxseq_length, embedding_size)
elif model_type == 'lstm':
    model = LSTM(batch_size, maxseq_length, embedding_size, keep_prob)
elif model_type == 'cnn':
    model = CNN(batch_size, maxseq_length, embedding_size)

with tf.Session() as sess:
    total_batch = int(len(test_X) / batch_size)
    save_path = './saved/' + model_type + '/model-' + str(test_epoch)

    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.restore(sess, save_path)
Example #24
0
def main():
    print('=========================================')
    print('               Numpy DNN                 ')
    print('              26/Nov/2017                ')
    print('    By Thang Vu ([email protected])    ')
    print('=========================================')

    # load datasets
    path = 'data/mnist.pkl.gz'
    train_set, val_set, test_set = load_mnist_datasets(path)
    batch_size = 128
    X_train, y_train = train_set
    X_val, y_val = val_set
    X_test, y_test = test_set

    # bookeeping for best model based on validation set
    best_val_acc = -1
    best_model = None

    # create model and optimization method
    dnn = DNN()
    sgd = SGD(lr=0.1, lr_decay=0.1, weight_decay=1e-3, momentum=0.9)
    
    # Train 
    batch_size = 128
    for epoch in range(20):
        dnn.train_mode() # set model to train mode (because of dropout)
        
        num_train = X_train.shape[0]
        num_batch = num_train//batch_size
        for batch in range(num_batch):
            # get batch data
            batch_mask = np.random.choice(num_train, batch_size)
            X_batch = X_train[batch_mask]
            y_batch = y_train[batch_mask]
           
            # forward
            output = dnn.forward(X_batch)
            loss, dout = softmax_cross_entropy_loss(output, y_batch)
            if batch%100 == 0:
                print("Epoch %2d Iter %3d Loss %.5f" %(epoch, batch, loss))

            # backward and update
            grads = dnn.backward(dout)
            sgd.step(dnn.params, grads)
                                
        sgd.decay_learning_rate() # decay learning rate after one epoch
        dnn.eval_mode() # set model to eval mode 
        train_acc = check_acc(dnn, X_train, y_train)
        val_acc = check_acc(dnn, X_val, y_val)

        if(best_val_acc < val_acc):
            best_val_acc = val_acc
            best_model = dnn

        # store best model based n acc_val
        print('Epoch finish. ')
        print('Train acc %.3f' %train_acc)
        print('Val acc %.3f' %val_acc)
        print('-'*30)
        print('')

    print('Train finished. Best acc %.3f' %best_val_acc)
    test_acc = check_acc(best_model, X_test, y_test)
    print('Test acc %.3f' %test_acc)
Example #25
0
import matplotlib.pyplot as plt
from aircraft_obj import AircraftEnv
from dnn import DNN
from verticalguidence import guidance
from scipy.optimize import root
from scipy.interpolate import interp1d
import math

# 横纵向弹道 横向确定方向,纵向确定大小
if __name__ == '__main__':
    # 纵向网络的定义
    num = 7000
    net = DNN(2,
              1,
              512,
              train=0,
              isnorm=True,
              name='w_%s' % str(num),
              scale=[0.1, 100, 100])  # 定义网络
    # 定义训练对象
    cav = AircraftEnv()
    # 进行飞行
    info = guidance(cav, net, tht_direction='strategy')
    states, ws, hcmds, thts = info['state_records'], info['w_records'], info[
        'hcmd_records'], info['tht_records']
    ts, angles = info['ts'], info['angles']
    # 纵向制导画图
    # 单个HV走廊
    fig1, ax1 = plt.subplots()
    cav.plot(states, hcmds, ax=ax1)
    ax1.set_xlabel('V(m/s)')
Example #26
0
        'target_error': cav.calculate_range(),
        'angles': np.array(angles),
        'ts': t
    }
    return info


if __name__ == '__main__':
    train_mode = 0  # 是否进行网络训练,0不训练,1从0开始训练,2从之前基础上开始训练
    num = 7000
    g = tf.Graph()
    memory = np.load('Trajectories/memory_%s.npy' % num)  # 读取数据
    net = DNN(2,
              1,
              512,
              train=train_mode,
              isnorm=True,
              name='w_%s' % str(num),
              graph=g,
              scale=[0.1, 100, 100])  # 定义网络
    memory_norm = net.norm(memory)
    if train_mode != 0:
        # 训练模式
        X = memory_norm[:, 1:].copy()
        Y = memory_norm[:, 0:1].copy()
        losses = []
        for i in range(5000):
            sample_index = np.random.choice(len(X), size=1000)
            batch_x = X[sample_index, :]
            batch_y = Y[sample_index, :]
            loss, mae = net.learn(batch_x, batch_y)
            losses.append(loss)
Example #27
0
def init_DNN(p, hidden_layers_units, nbr_classes):
    """ construit et d’initialise les poids et les biais d’un DNN"""
    return DNN(p, hidden_layers_units, nbr_classes)
Example #28
0
class MountainCar:
    """
    定义预测出来的模型
    """
    def __init__(self, name='Goodone', net=None, train=0):
        """
        初始化
        net: 训练的神经网络
        verity:使用还是验证阶段
        验证阶段,神经网络未训练
        使用阶段,神经网络已训练
        """
        self.env = gym.make("MountainCarContinuous-v0")
        self.name = name
        self.simulation_step = 0.1
        self.units = 50
        self.ratio = 200
        self.reset()
        if net:
            self.net = net
        else:
            self.net = DNN(1, 1, self.units, train=train, name=self.name)

    def save_samples(self, big_epis=100):
        """
        保存运行得到的数据
        得到的数据有big_epis*3000行
        """
        record = []
        for big_epi in range(big_epis):
            # 初始化
            # 为了能够达到目标点
            a = 0.0025
            change = 100
            observation = self.reset()
            for epi in range(10000):
                if epi % change == 0:
                    u = self.action_sample() * 3
                    print(big_epi, int(20 * epi / 3000) * '=')
                observation_old = observation.copy()
                observation, _, done, _ = self.env.step(u)
                target = self._get_target(observation_old, observation, u)
                x = observation_old[0]
                # 保存真实值和计算得到的值,后期作为比较
                # record.append([x, target, -a * math.cos(3 * x)])
                record.append([x, target])
        data = np.array(record)
        np.save(os.path.join(self.net.model_path0, 'memory.npy'), data)
        return data

    def verity_data(self):
        """
        验证数据集的正确性,画出两个自己计算出来的值和真实值的区别
        """
        import matplotlib.pyplot as plt
        import pandas as pd
        import seaborn as sns
        sns.set()

        self.data = self._load_data()
        data_size = len(self.data)
        indexs = np.random.choice(data_size, size=int(data_size / 10))
        df = pd.DataFrame(self.data[indexs, :],
                          columns=['position', 'target_dot', 'real_dot'])
        plt.figure()
        plt.scatter(df['position'],
                    df['target_dot'] * 1.1,
                    s=5,
                    label='target')  # 为了显示出区别乘以1.1
        plt.scatter(df['position'], df['real_dot'], s=5, label='real')
        plt.legend()
        plt.show()

    def train_model(self):
        """
        利用得到的数据对模型进行训练,首先对数据进行缩放,之后利用神经网络进行拟合
        """
        # 训练
        data = self._load_data()
        data[:, 1:] = data[:, 1:] * self.ratio
        self.net.learn_data(data)
        self.net.store_net()

    def verity_net_1(self):
        """
        验证神经网络的正确性
        """

        a = 0.0025
        x_ = np.arange(-1.1, 0.5, 0.001)
        y_tru = -a * np.cos(3 * x_)
        y_pre = self.net.predict(x_.reshape((-1, 1))) / self.ratio
        # 验证对所有的x的拟合情况
        fig = plt.figure()
        plt.plot(x_, y_tru, label='x_tru')
        plt.plot(x_, y_pre, label='x_pre')
        plt.legend()

        y_tru_dot = 3 * a * np.sin(3 * x_)
        y_pre_dot = self.net.predict_dot(x_.reshape(
            (-1, 1)))[:, 0] / self.ratio
        # y_pre_dot = self.net.predict_dot(x_.reshape((-1, 1)))[:, 0]
        # 验证对所有的x_dot的拟合情况
        fig = plt.figure()
        plt.plot(x_, y_tru_dot, label='x_dot_tru')
        plt.plot(x_, y_pre_dot, label='x_dot_pre')
        plt.legend()

        plt.show()

    def verity_net_2(self):
        """
        验证神经网络的正确性2
        与真实系统的的比较
        """
        observation_record = []
        observation_record_net = []
        time_record = []
        observation = self.reset()
        observation_net = observation

        change = 100
        time = 0
        epi = 0
        while True:
            observation_record.append(observation)
            observation_record_net.append(observation_net)
            time_record.append(time)
            if epi % change == 0:
                action = self.action_sample() * 3
            epi += 1
            observation, _, done, info = self.env.step(action)
            observation_net, _, done_net, info_net = self.step(action)
            time += self.simulation_step
            print(observation, observation_net)
            if done_net:
                break

        observation_record = np.array(observation_record)
        observation_record_net = np.array(observation_record_net)
        time_record = np.array(time_record)

        plt.figure(1)
        plt.plot(time_record, observation_record[:, 0], label='x_ture')
        plt.plot(time_record, observation_record_net[:, 0], label='x_pre')
        plt.xlabel('Time(s)')
        plt.ylabel('Xposition')
        plt.plot(time_record, 0.45 * np.ones(len(observation_record)), 'r')
        plt.legend()

        plt.figure(2)
        plt.plot(time_record, observation_record[:, 1], label='v_ture')
        plt.plot(time_record, observation_record_net[:, 1], label='v_pre')
        plt.xlabel('Time(s)')
        plt.ylabel('Vspeed')
        plt.legend()
        plt.show()

    def _load_data(self):
        """
        将最开始得到的数据读取出来
        :return:
        """
        data = np.load(os.path.join(self.net.model_path0, 'memory.npy'))
        return data

    def action_sample(self):
        """
        随机选取符合环境的动作
        """
        return self.env.action_space.sample()

    def reset(self):
        """
        利用原始问题的初始化,随机初始化
        """
        self.state = self.env.reset()
        return self.state

    def step(self, action):
        """
        利用神经网络进行模型辨识
        """
        action = min(max(action, -1.0), 1.0)
        x, v = self.state
        # 神经网络得到的导数
        dot = self.get_dot(self.state)
        v_dot = 0.0015 * action + dot[0]
        v = v + v_dot * self.simulation_step
        v = min(max(v, -0.07), 0.07)

        # 通过v计算x
        x = x + self.simulation_step * v
        x = min(max(x, -1.2), 0.6)
        X = np.array([x, v])
        if X.ndim == 2:
            X = X.reshape((2, ))
        self.state = X
        # 返回参数
        info = {}
        done = {}
        reward = {}
        if x >= 0.45:
            done = True
        return self.state, reward, done, info

    def step_true(self, action):
        """
        利用原进行模型辨识
        """
        action = min(max(action, -1.0), 1.0)
        x, v = self.state
        # 神经网络得到的导数
        # dot = self.get_dot(self.state)
        v_dot = 0.0015 * action - 0.0025 * math.cos(3 * x)
        v = v + v_dot * self.simulation_step
        v = min(max(v, -0.07), 0.07)

        # 通过v计算x
        x = x + self.simulation_step * v
        x = min(max(x, -1.2), 0.6)
        X = np.array([x, v])
        if X.ndim == 2:
            X = X.reshape((2, ))
        self.state = X
        # 返回参数
        info = {}
        done = {}
        reward = {}
        if x >= 0.45:
            done = True
        return self.state, reward, done, info

    def get_dot(self, X):
        return self.net.predict(X[0:1])[0] / self.ratio

    def get_dot2(self, X):
        return self.net.predict_dot(X[0:1])[0] / self.ratio

    def _get_target(self, X, X_new, u):
        """
        得到神经网络需要的真实值
        首先求真实的导数,之后计算真实值
        """
        u = min(max(u, -1.0), 1.0)
        return (((X_new - X) / self.simulation_step)[1] - u * 0.0015)
Example #29
0
elif sys.argv[1] == 'lr':
    from lr import LR
    model = LR(user_count=user_count,
               item_count=item_count,
               cate_count=cate_count,
               cate_list=cate_list)
elif sys.argv[1] == 'lrcross':
    from lrcross import LR
    model = LR(user_count=user_count,
               item_count=item_count,
               cate_count=cate_count,
               cate_list=cate_list)
elif sys.argv[1] == 'dnn':
    from dnn import DNN
    model = DNN(user_count=user_count,
                item_count=item_count,
                cate_count=cate_count,
                cate_list=cate_list)
elif sys.argv[1] == 'widedeep':
    from widedeep import WideDeep
    model = WideDeep(user_count=user_count,
                     item_count=item_count,
                     cate_count=cate_count,
                     cate_list=cate_list)
else:
    print('usage train.py lr|dnn|widedeep|deepfm')
    sys.exit(1)

with tf.Session() as sess:
    ## TODO 初始化局部和全局变量
    raise NotImplementedError()
class TrainTieBot:

    def __init__(self):
        self.bagOfWords = None
        self.wordFeatures = None

    def tokenize(self, corpus):
        if not isinstance(corpus, dict):
            raise(TrainTieBotException('Corpus must be of type:dict()'))
        # # tokenize each sentence
        tokenizer = RegexpTokenizer(r'\w+')
        token = [str(x) for x in tokenizer.tokenize(corpus.values()[0].lower())]
        return token

    def cleanUpQuery(self, sentence):
        tokenizer = RegexpTokenizer(r'\w+')
        sentence = sentence.lower()
        return tokenizer.tokenize(sentence)

    def getFeaturesByName(self, corpus):
        words = list()
        if not isinstance(corpus, list):
            raise(TrainTieBotException('Corpus must be of type:list()'))
        for x in corpus:
            words.extend(x.values()[0])
        self.wordFeatures = list(set(words))
        return self.wordFeatures

    def BOW(self, corpus):
        if not isinstance(corpus, list):
            raise(TrainTieBotException('Corpus must be of type:list()'))
        tokenDict = [{x.keys()[0]: self.tokenize(x)} for x in corpus]
        getWordFeatures = self.getFeaturesByName(tokenDict)
        vectorDataFrame = pd.DataFrame(data=np.zeros([len(corpus), len(getWordFeatures)]).astype(int))
        # add a label column
        labels = [x.keys()[0] for x in corpus]
        for word in vectorDataFrame.index.tolist():
            vectorDataFrame.loc[word, :] = \
                [corpus[word].values()[0].count(item) if item in corpus[word].values()[0] else 0 for item in getWordFeatures]
        vectorDataFrame['y'] = labels
        self.bagOfWords = vectorDataFrame
        return self.bagOfWords

    def BOWFit(self, query):
        if self.bagOfWords is None:
            raise (TrainTieBotException('Create Bag of Words vectors before fitting it to a new query.'))
        tokenDict = [{x.keys()[0]: self.tokenize(x)} for x in query]
        arrayFitDf = pd.DataFrame(data=np.zeros([len(tokenDict), len(self.wordFeatures)])).astype(int)
        arrayFitDf['y'] = [x.keys()[0] for x in query]
        for i in range(len(query)):
            arrayFitDf.iloc[i, :-1] = [query[i].values()[0].count(item) if item in query[i].values()[0] else 0 for item in self.wordFeatures]
        return arrayFitDf

    def list2df(self, data, dataKey, labelKey):
        df = {}
        df[dataKey] = []
        df[labelKey] = []
        for obj in data:
            df[dataKey].append(obj.values()[0])
            df[labelKey].append(obj.keys()[0])
        return pd.DataFrame.from_dict(df)

    def df2list(self, df, dataKey, labelKey):
        df = df[[dataKey, labelKey]]
        return [{df[labelKey][x]:df[dataKey][x]} for x in range(df.shape[0])]

    def prepareDF(self, corpus=None, dataKey='Question', labelKey='y', excelLocation=None):
        if corpus is None:
            corpusObj = Corpus()
            corpusT = corpusObj.loadData(excelLocation)
            corpusT = self.df2list(corpusT, dataKey, labelKey)
            # Expand Vocabulary list with part of speeches
            corpusT = corpusObj.getExpandedSentences(corpusT)
            # #print "corpus test:"
            corpusT = self.list2df(corpusT, dataKey, labelKey)
        else:
            corpusObj = Corpus()
            corpusT = corpusObj.getExpandedSentences(corpus)
            corpusT = self.list2df(corpusT, dataKey, labelKey)
        return corpusT


    def getX(self, corpus=None):
        dataKey = 'Question'
        labelKey = 'y'
        corpusObj = Corpus()
        if corpus is None:
            corpusTrain = corpusObj.loadData(TRAIN_EXCEL)
            corpusTrain = self.df2list(corpusTrain, dataKey, labelKey)
            corpusTrain = corpusObj.getExpandedSentences(corpusTrain)
            BOWTrain = self.BOW(corpusTrain)

            X = BOWTrain.iloc[:, :-1]
            y = BOWTrain.iloc[:, -1]
        else:
            corpusTrain = corpusObj.getExpandedSentences(corpus)
            BOWTrain = self.BOW(corpusTrain)
            X = BOWTrain.iloc[:, :-1]
            y = BOWTrain.iloc[:, -1]
        return X, y

    def gety(self, query):
        dataKey = 'Question'
        labelKey = 'y'
        corpusTestObj = Corpus()
        corpusTest = corpusTestObj.getExpandedSentences(query)
        BOWTest = self.BOWFit(corpusTest)
        X_test = BOWTest.iloc[:, :-1]
        y_test = BOWTest[labelKey]
        return X_test, y_test

    def runDNNTrain(self, corpus=None, learningRate=0.02, hiddenUnitSize=[128, 256, 128], dataKey='Question', labelKey='y'):
        if corpus is None:
            corpus = self.prepareDF(excelLocation=TRAIN_EXCEL)

        self.dnnObject = DNN(pd_df_train=corpus,
                        pd_df_test=None,
                        learning_rate=learningRate,
                        hidden_units_size=hiddenUnitSize,
                        dataKey=dataKey,
                        labelKey=labelKey)
        result = self.dnnObject.run()
        return result


    def runSVM(self, corpus=None):
        X, y = self.getX(corpus)
        svm = SVMClassifier(X, y)
        clf = svm.train()
        #y_pred = clf.predict(X)
        #y_test_pred = clf.predict(X_test)
        #result1 = float(sum((y == y_pred) + 0)) / y_pred.shape[0]
        #result2 = float(sum((y_test == y_test_pred) + 0)) / y_test_pred.shape[0]
        # print 'SVM Training set accuracy: ', result1
        # print 'SVM Test set accuracy: ', result2
        return clf

    def runLR(self, corpus=None, query=None):
        X, y = self.getX(corpus)
        lr = LogisticRegClassifier(X, y)
        clf = lr.train()
        #y_pred = clf.predict(X)
        #y_test_pred = clf.predict(X_test)
        #result1 = float(sum((y == y_pred) + 0))/y_pred.shape[0]
        #result2 = float(sum((y_test == y_test_pred) + 0))/y_test_pred.shape[0]
        # print 'LR Training set accuracy: ', result1
        # print 'LR Test set accuracy: ', result2
        return clf

    def getAnswer(self, answer_index):
        answer = Corpus().loadData(TRAIN_EXCEL)
        return answer['Answer'][answer_index]