Пример #1
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    # predictions = model.predict_point_by_point(x_test)

    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
Пример #2
0
Файл: run.py Проект: t-k-/nbnp
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    # create output directory for saving result images
    if not os.path.exists('./output'): os.mkdir('./output')

    # define network we are going to load
    net = Net([
        Conv2D(kernel=[5, 5, 1, 6], stride=[1, 1], padding="SAME"),
        ReLU(),
        MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
        Conv2D(kernel=[5, 5, 6, 16], stride=[1, 1], padding="SAME"),
        ReLU(),
        MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
        Flatten(),
        Dense(120),
        ReLU(),
        Dense(84),
        ReLU(),
        Dense(10)
    ])

    # load the model
    model = Model(net=net, loss=SoftmaxCrossEntropyLoss(), optimizer=Adam())
    print('loading pre-trained model file', args.model_path)
    model.load(args.model_path)

    # create pyplot window for on-the-fly visualization
    img = np.ones((1, 28, 28, 1))
    fig = disp_mnist_batch(img)

    # actual visualization generations

    layer_name = 'conv-layer-1'
    print('[ ' + layer_name + ' ]')
    images = am_visualize_conv_layer(model, 0, fig)
    save_batch_as_images('output/{}.png'.format(layer_name),
                         images,
                         title='visualized feature maps for ' + layer_name)

    layer_name = 'conv-layer-2'
    print('[ ' + layer_name + ' ]')
    images = am_visualize_conv_layer(model, 3, fig)
    save_batch_as_images('output/{}.png'.format(layer_name),
                         images,
                         title='visualized feature maps for ' + layer_name)
Пример #3
0
def main():
    configs = json.load(open(CONFIG, 'r'))

    data = DataLoader(DATA, configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          model_path=MODEL)

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    #predictions = model.predict_point_by_point(x_test)

    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
    #plot_results(predictions, y_test)
    sys.stdout.write("--END--")
Пример #4
0
def main(choice):
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])
    model = Model()
    model.build_model(configs)
    if (choice != 'info'):
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])

        # in-memory training
        model.train(x,
                    y,
                    epochs=configs['training']['epochs'],
                    batch_size=configs['training']['batch_size'])

        # out-of memory generative training
        # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
        # model.train_generator(
        #     data_gen = data.generate_train_batch(
        #         seq_len = configs['data']['sequence_length'],
        #         batch_size = configs['training']['batch_size'],
        #         normalise = configs['data']['normalise']
        #     ),
        #     epochs = configs['training']['epochs'],
        #     batch_size = configs['training']['batch_size'],
        #     steps_per_epoch = steps_per_epoch
        # )

        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])

        if (choice == "multi"):
            predictions = model.predict_sequences_multiple(
                x_test, configs['data']['sequence_length'],
                configs['data']['sequence_length'])
            plot_results_multiple(predictions, y_test,
                                  configs['data']['sequence_length'])
        elif (choice == "seq"):
            predictions = model.predict_sequence_full(
                x_test, configs['data']['sequence_length'])
            plot_results(predictions, y_test)
        else:
            predictions = model.predict_point_by_point(x_test)
            plot_results(predictions, y_test)
    def __init__(self, parent=None):
        super(MainForm, self).__init__(parent)

        # Main window instance.
        self.ui = Ui_MainWindow()
        # Initialize the main window user interface.
        self.ui.setupUi(self)

        # Model instance.
        self.mongodb_obj = Model()

        # Initialize in advance.
        for i in range(1, 5):
            self._init_all(i)

        # Video player instance.
        self.video_window = VideoWindow()
Пример #6
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    # data preparing
    data_path = os.path.join(args.data_dir, args.file_name)
    train_x, train_y, img_shape = prepare_dataset(data_path)

    net = Net([
        Dense(30),
        ReLU(),
        Dense(60),
        ReLU(),
        Dense(60),
        ReLU(),
        Dense(30),
        ReLU(),
        Dense(3),
        Sigmoid()
    ])

    model = Model(net=net, loss=MSELoss(), optimizer=Adam())
    mse_evaluator = MSEEvaluator()
    iterator = BatchIterator(batch_size=args.batch_size)
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            preds = model.forward(batch.inputs)
            loss, grads = model.backward(preds, batch.targets)
            model.apply_grad(grads)

        # evaluate
        preds = net.forward(train_x)
        mse = mse_evaluator.evaluate(preds, train_y)
        print(mse)

        if args.paint:
            # generate painting
            preds = preds.reshape(img_shape[0], img_shape[1], -1)
            preds = (preds * 255.0).astype("uint8")
            filename, ext = os.path.splitext(args.file_name)
            output_filename = "output" + ext
            output_path = os.path.join(args.data_dir, output_filename)
            Image.fromarray(preds).save(output_path)
        print("Epoch %d time cost: %.2f" % (epoch, time.time() - t_start))
Пример #7
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    # data preparing
    train_x, train_y, img_shape = prepare_dataset(args.img)

    net = Net([
        Dense(30),
        ReLU(),
        Dense(100),
        ReLU(),
        Dense(100),
        ReLU(),
        Dense(30),
        ReLU(),
        Dense(3),
        Sigmoid()
    ])

    model = Model(net=net, loss=MSE(), optimizer=Adam())
    iterator = BatchIterator(batch_size=args.batch_size)
    for epoch in range(args.num_ep):
        for batch in iterator(train_x, train_y):
            preds = model.forward(batch.inputs)
            loss, grads = model.backward(preds, batch.targets)
            model.apply_grad(grads)

        # evaluate
        preds = net.forward(train_x)
        mse = mean_square_error(preds, train_y)
        print("Epoch %d %s" % (epoch, mse))

        # generate painting
        if epoch % 5 == 0:
            preds = preds.reshape(img_shape[0], img_shape[1], -1)
            preds = (preds * 255.0).astype("uint8")
            name, ext = os.path.splitext(args.img)
            filename = os.path.basename(name)
            out_filename = filename + "-paint-epoch" + str(epoch) + ext
            if not os.path.exists(args.output_dir):
                os.makedirs(args.output_dir)
            out_path = os.path.join(args.output_dir, out_filename)
            Image.fromarray(preds).save(out_path)
            print("save painting to %s" % out_path)
Пример #8
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):os.makedirs(configs['model']['save_dir'])

    model = Model()
    my_model = model.build_model(configs)

    plot_model(my_model, to_file='output\model.png', show_shapes=True)
    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    x, y = data.get_train_data(
        configs['data']['sequence_length'],
        configs['data']['normalise']
    )

    print(x.shape)
    print(y.shape)

    print(configs['training']['batch_size'])
    print(configs['model']['save_dir'])
    model.train(x,
                y,
                configs['training']['epochs'],
                configs['training']['batch_size'],
                configs['model']['save_dir']
                )

    x_test, y_test = data.get_test_data(
        configs['data']['sequence_length'],
        configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiplt(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequences_full(x_test, configs['data']['sequence_length'])
    prediction_point = model.predict_point_by_point(x_test)

    # print(prediction_point)
    # print(np.array(predictions).shape)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(prediction_point, y_test)
Пример #9
0
def main():
    #load parameters
    configs = json.load(open('./data/config.json','r'))
    if not os.path.exists(configs['model']['save_dir']):os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data',configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns'],

    )
    #create RNN model
    model=Model()
    model.build_model(configs)

    #loading trainning data
    x,y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )
    print(x.shape)
    print(y.shape)

    #training model
    model.train(
        x,
        y,
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        save_dir=configs['model']['save_dir']
    )

    #test results
    x_test, y_test = data.get_test_data(
        seq_len= configs['data']['sequence_length'],
        normalise=configs['data']['normalise'],
    )

    #results visualization
    predictions_multiseq = model.predict_sequences_multiple(x_test,configs['data']['sequence_length'],configs['data']['sequence_length'])
    predictions_pointbypoint=model.predict_point_by_point(x_test)

    plot_results_multiple(predictions_multiseq,y_test,configs['data']['sequence_length'])
    plot_results(predictions_pointbypoint,y_test)
Пример #10
0
def main():
    #读取所需参数
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])
    #读取数据
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])
    #创建RNN模型
    model = Model()
    mymodel = model.build_model(configs)

    plot_model(mymodel, to_file='model.png', show_shapes=True)

    #加载训练数据
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    print(x.shape)
    print(y.shape)

    #训练模型
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])

    #测试结果
    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    #展示测试效果
    predictions = model.predict_sequences_multiple(
        x_test,
        configs['data']['sequence_length'],
        configs['data']['sequence_length'],
        debug=False)
    print(np.array(predictions).shape)

    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
Пример #11
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
    )

    model = Model()
    model.build_model(configs)

    # get train data
    x, y = data.get_train_data()

    #x=x.squeeze()
    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])
    #    # out-of memory generative training
    #    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    #    model.train_generator(
    #        data_gen=data.generate_train_batch(
    #            batch_size=configs['training']['batch_size'],
    #        ),
    #        epochs=configs['training']['epochs'],
    #        batch_size=configs['training']['batch_size'],
    #        steps_per_epoch=steps_per_epoch,
    #        save_dir=configs['model']['save_dir']
    #    )

    # testing model
    x_test, y_test = data.get_test_data()
    #x_test=x_test.squeeze()

    predictions = model.predict_point_by_point(x_test)

    #   plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(predictions, y_test)
Пример #12
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    dataframe = pd.read_csv(configs['data']['filename'])
    f = open('/Users/yucheng/Downloads/project2/stockIDs.txt', 'r')
    stockIDs = [int(line.split('\n')[0]) for line in f.readlines()]
    for id in stockIDs[377:378]:
        # for id in stockIDs[444:500]:
        print("index: ", stockIDs.index(id))
        data = DataLoader(dataframe, id, configs['data']['train_test_split'],
                          configs['data']['columns'])

        model = Model()
        model.build_model(configs)
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])
        '''
    	# in-memory training
    	model.train(
    		x,
    		y,
    		epochs = configs['training']['epochs'],
    		batch_size = configs['training']['batch_size'],
    		save_dir = configs['model']['save_dir']
    	)
    	'''
        # out-of memory generative training
        steps_per_epoch = math.ceil(
            (data.len_train - configs['data']['sequence_length']) /
            configs['training']['batch_size'])
        model.train_generator(id=id,
                              data_gen=data.generate_train_batch(
                                  seq_len=configs['data']['sequence_length'],
                                  batch_size=configs['training']['batch_size'],
                                  normalise=configs['data']['normalise']),
                              epochs=configs['training']['epochs'],
                              batch_size=configs['training']['batch_size'],
                              steps_per_epoch=steps_per_epoch,
                              save_dir=configs['model']['save_dir'])
Пример #13
0
def test_parameters_change(fake_dataset):
    # make sure the parameters does change after apply gradients

    # fake dataset
    X, y = fake_dataset
    # simple model
    net = Net([Dense(10), Dense(1)])
    loss = MSE()
    opt = SGD(lr=1.0)
    model = Model(net, loss, opt)

    # forward and backward
    pred = model.forward(X)
    loss, grads = model.backward(pred, y)

    # parameters change test
    params_before = model.net.params.values
    model.apply_grad(grads)
    params_after = model.net.params.values
    for p1, p2 in zip(params_before, params_after):
        assert np.all(p1 != p2)
Пример #14
0
    def make_simple_model(self) -> Model:
        graph = Graph()

        # two inputs
        x = Input(
            'input',
            [1, 5, 5, 3],
            Float32(),
        )

        w = Constant(
            'weight',
            Float32(),
            np.zeros([1, 2, 2, 3]),
            dimension_format='NHWC',
        )

        # Conv
        conv = Conv(
            'conv',
            [1, 4, 4, 1],
            Float32(),
            {'X': x, 'W': w},
            kernel_shape=[2, 2]
        )

        # One output
        y = Output(
            'output',
            [1, 4, 4, 1],
            Float32(),
            {'input': conv}
        )

        # add ops to the graph
        graph.add_op_and_inputs(y)
        model = Model()
        model.graph = graph
        return model
Пример #15
0
def main(args):
    train_set, valid_set, test_set = prepare_dataset(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    train_y = get_one_hot(train_y, 10)

    net = Net([
        Dense(784, 200),
        ReLU(),
        Dense(200, 100),
        ReLU(),
        Dense(100, 70),
        ReLU(),
        Dense(70, 30),
        ReLU(),
        Dense(30, 10)
    ])

    model = Model(net=net,
                  loss=SoftmaxCrossEntropyLoss(),
                  optimizer=Adam(lr=args.lr))

    iterator = BatchIterator(batch_size=args.batch_size)
    evaluator = AccEvaluator()
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            pred = model.forward(batch.inputs)
            loss, grads = model.backward(pred, batch.targets)
            model.apply_grad(grads)
            loss_list.append(loss)
        t_end = time.time()
        # evaluate
        test_pred = model.forward(test_x)
        test_pred_idx = np.argmax(test_pred, axis=1)
        test_y_idx = np.asarray(test_y)
        res = evaluator.evaluate(test_pred_idx, test_y_idx)
        print("Epoch %d time cost: %.4f\t %s" % (epoch, t_end - t_start, res))
Пример #16
0
def main():
    configs = json.load(open('config.json', 'r'))
    model = Model()
    model.load_model("./saved_models/model2.h5")

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(predictions, y_test)
Пример #17
0
 def create(
     cls,
     architecture: str,
     dataset: str,
     loss: str,
     optimizer: str,
     metrics: str,
     epochs: int,
     patience: int,
 ) -> Model:
     """
     Builds a deep learning model from a pool of datasets, architectures, and options.
     """
     return Model(
         cls.ARCHITECTURES[architecture],
         cls.DATASETS[dataset],
         cls.LOSSES[loss],
         cls.OPTIMIZERS[optimizer],
         cls.METRICS[metrics],
         epochs,
         patience,
     )
Пример #18
0
 def evaluation(self, model_dir, global_step):
     with tf.Graph().as_default() as g:
         image, label = self.data_input()
         prey = Model(image)
         prey = tf.argmax(prey, axis=1)
         accuracy, updata = tf.metrics.accuracy(label, prey)
         tf.summary.scalar(self.model + "_accuracy", accuracy)
         marge = tf.summary.merge_all()
         restore = tf.train.Saver()
         with tf.Session() as sess:
             # sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
             sess.run(tf.local_variables_initializer())
             coord = tf.train.Coordinator()
             thread = tf.train.start_queue_runners(sess, coord)
             restore.restore(sess, model_dir)
             for _ in range(100):
                 sess.run(updata)
             accuracy_val, marge_val = sess.run([accuracy, marge])
             self.summary.add_summary(marge_val, global_step)
             coord.request_stop()
             coord.join(thread)
     return accuracy_val
Пример #19
0
def main():
    model_config = json.load(open("config.json", "r"))["model"]
    dataset_path = 'data/'
    # Requirement: Check the presence of the dataset
    if check_dataset(dataset_path):
        configs = json.load(open('config.json', 'r'))
        # 1) Build the model
        model = Model(model_config=model_config)
        model.build_model(configs)
        batch_size, epochs, = 4, 30
        cols = configs['training']['cols']
        sequence_length = configs['data']['sequence_length']
        save_dir = "model"
        l = 0
        dataset_path = glob.glob("{}/*.txt".format(dataset_path))
        # 2 ) Loop over the files in the dataset folder
        for filename in dataset_path:
            print("Training {}/{} - {}".format(l, len(dataset_path), filename))
            l += 1
            # 3) Divide the dataset in parts and loop over them
            chunksize = 10**4
            for chunk in pd.read_csv(filename, chunksize=chunksize):
                # 4) Get and prepare data
                data = DataModel()
                x = data.get_train_data(
                    data=[x for x in chunk.get(cols).values.tolist()],
                    seq_len=sequence_length)
                X_train, X_test, y_train, y_test = train_test_split(
                    data.dataX, data.dataY, test_size=0.33)
                print(y_train.shape)
                # 5) Train the model
                model.train(X_train,
                            X_test,
                            y_train,
                            y_test,
                            epochs=epochs,
                            batch_size=batch_size,
                            save_dir=save_dir)
Пример #20
0
def main_plot():

    configs = json.load(open(config_file, 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'],
                      normalise_meth=configs['data']['normalise'])

    x, y = data.get_test_data(seq_len=configs['data']['sequence_length'],
                              normalise=configs['data']['normalise'])
    model = Model()
    global newest_model
    if newest_model:
        model_way = newest_model
    else:
        model_way = '/home/bf/Documents/Projects/helpplay/HelpPlay/train/LSTM-Neural-Network-for-Time-Series-Prediction/saved_models/10062019-163648-e40.h5'
    model.load_model(model_way)
    print(model.model.evaluate(x, y))
    pre_y = model.predict_point_by_point(x)
    print(x)
    plot_results(pre_y, y)
Пример #21
0
def run():
    wavelet = np.load(DATAPATH['wavelet'])
    seismic = np.load(DATAPATH['seismic'])  # nsample, nline, ncdp
    init_AI = np.load(DATAPATH['init_AI'])  # nsample, nline, ncdp

    for epoch in range(nsim_per_gpu):
        tf.reset_default_graph()  # clear the tensorflow graph

        models = []

        for i in range(ngpu):
            with tf.device(
                    devices[i]
            ):  # allocate computing devices for different models
                with tf.name_scope('Realization_%d' % (i + epoch * ngpu)):
                    model = Model(wavelet,
                                  seismic,
                                  init_AI,
                                  regularization_weight=regularization_weight,
                                  learning_rate=learning_rate)
                    models.append(model)

        # create a session to run the data flow graph
        with tf.Session() as sess:
            tf.global_variables_initializer().run()
            training_steps = HYPER_PARAS['training_steps']
            for step in range(training_steps):
                sess.run([model.train_step for model in models])

            realizations = sess.run([model.sim_AI for model in models])

            # save the updated realizations
            for i in range(len(realizations)):
                save_path = os.path.join(
                    OUTPUT['results'],
                    'realization_%d.npy' % (epoch * ngpu + i))
                np.save(save_path, realizations[i])
Пример #22
0
def main():
    configs = json.load(open('config.json', 'r'))

    # 加载数据
    X_train, y_train, X_test, y_test = None

    # 生成模型
    # load_model和build_model方法二选一
    model = Model()
    # model.load_model(filepath='')
    model.build_model(configs)

    # 训练模型
    model.train_model(x=X_train,
                      y=y_train,
                      epochs=configs['training']['epochs'],
                      batch_size=configs['training']['batch_size'],
                      save_dir=configs['model']['save_dir'])

    # 测试模型
    prediction = model.test_model(x=X_test)

    print "Test data true label is : %s" % y_test
    print "Model output is : %s" % prediction
Пример #23
0
def main():
    tf.set_random_seed(1231)
    np.random.seed(1231)

    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("Add a config file using \'--config file_name.json\'")
        exit(1)

    makedirs(config.summary_dir)
    makedirs(config.checkpoint_dir)

    # set logger
    path = os.path.dirname(os.path.abspath(__file__))
    path1 = os.path.join(path, 'core/model.py')
    path2 = os.path.join(path, 'core/train.py')
    logger = get_logger('log',
                        logpath=config.summary_dir + '/',
                        filepath=os.path.abspath(__file__),
                        package_files=[path1, path2])

    logger.info(config)

    # load data
    train_loader, test_loader = load_pytorch(config)

    # define computational graph
    sess = tf.Session()

    model_ = Model(config, _INPUT_DIM[config.dataset],
                   len(train_loader.dataset))
    trainer = Trainer(sess, model_, train_loader, test_loader, config, logger)

    trainer.train()
Пример #24
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    train_set, valid_set, test_set = prepare_dataset(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    # train_y = get_one_hot(train_y, 2)

    net = Net([Dense(100), ReLU(), Dense(30), ReLU(), Dense(1)])

    model = Model(net=net,
                  loss=SigmoidCrossEntropyLoss(),
                  optimizer=Adam(lr=args.lr))

    iterator = BatchIterator(batch_size=args.batch_size)
    evaluator = AccEvaluator()
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            pred = model.forward(batch.inputs)
            loss, grads = model.backward(pred, batch.targets)
            model.apply_grad(grads)
            loss_list.append(loss)
        print("Epoch %d time cost: %.4f" % (epoch, time.time() - t_start))
        # evaluate
        model.set_phase("TEST")
        test_y_idx = np.asarray(test_y).reshape(-1)
        test_pred = model.forward(test_x)
        test_pred[test_pred > 0] = 1
        test_pred[test_pred <= 0] = 0
        test_pred_idx = test_pred.reshape(-1)
        res = evaluator.evaluate(test_pred_idx, test_y_idx)
        print(res)
        model.set_phase("TRAIN")
Пример #25
0
    def read(self, pb_path: str, json_path: Optional[str] = None) -> Model:
        """Read ONNX file and load model.

        Parameters
        ----------
        pb_path : str
            Path to ONNX file

        Returns
        -------
        model : Model
            Loaded model

        """
        model = Model()

        # load onnx model
        onnx_model = onnx.load(path.abspath(pb_path))

        # debug print in JSON
        if json_path:
            from pip._internal import main
            main(['install', 'protobuf'])
            from google.protobuf.json_format import MessageToJson, Parse
            js_str = MessageToJson(onnx_model)
            js_obj = json.loads(js_str)
            with open(json_path, 'w') as fw:
                json.dump(js_obj, fw, indent=4)

        # ckeck if it's a valid model
        # onnx.checker.check_model(onnx_model)

        # import graph
        model.graph = Importer.make_graph(onnx_model)

        return model
Пример #26
0
def gradient_check():
    tf.set_random_seed(1231)
    np.random.seed(1231)

    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("Add a config file using \'--config file_name.json\'")
        exit(1)

    # set logger
    path = os.path.dirname(os.path.abspath(__file__))
    path1 = os.path.join(path, 'core/model.py')
    path2 = os.path.join(path, 'core/train.py')
    logger = get_logger('log',
                        logpath=config.summary_dir + '/',
                        filepath=os.path.abspath(__file__),
                        package_files=[path1, path2])

    logger.info(config)

    batch_sizes = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]

    precon = False
    for bs in batch_sizes:
        start_time = time.time()
        print("processing batch size {}".format(bs))

        # load data
        train_loader, test_loader = load_pytorch(config)

        # define computational graph
        sess = tf.Session()

        model_ = Model(config, _INPUT_DIM[config.dataset],
                       len(train_loader.dataset))
        trainer = Trainer(sess, model_, train_loader, test_loader, config,
                          logger)

        trainer.grad_check(sess, bs, precon)
        print('batch size {} takes {} secs to finish'.format(
            bs,
            time.time() - start_time))
        tf.reset_default_graph()

    precon = True
    for bs in batch_sizes:
        start_time = time.time()
        print("processing batch size {}".format(bs))

        # load data
        train_loader, test_loader = load_pytorch(config)

        # define computational graph
        sess = tf.Session()

        model_ = Model(config, _INPUT_DIM[config.dataset],
                       len(train_loader.dataset))
        trainer = Trainer(sess, model_, train_loader, test_loader, config,
                          logger)

        trainer.grad_check(sess, bs, precon)
        print('batch size {} takes {} secs to finish'.format(
            bs,
            time.time() - start_time))
        tf.reset_default_graph()
Пример #27
0
def main():
    configs = json.load(open('configcrops.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])

    # Yogyakarta: Kulon progo, bantul, gunung kidul, sleman, DIY
    # Jawa Barat: Bandung, Tasikmalaya, Majalengka, Cirebon, Kuningan, Garut, Sumedang, Cianjut, Subang, Purwakarta, Indramayu
    # Ciamis, Sukabumi, Bogor, Bekasi, Karawang

    # # out-of memory generative training
    # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise']
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    # # save_dir = configs['model']['save_dir']

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # print(x_test)
    # print(y_test)

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])

    predictions_point = model.predict_point_by_point(x_test)
    print(len(predictions_point))
    plot_results(predictions_point, y_test)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    # predictions_full = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    # plot_results(predictions_full, y_test)

    groundtrue = data._groundtruths(1)
    groundtrue = (groundtrue.ravel())
    print(len(groundtrue))

    RMSElist = []
    for i in range(len(groundtrue)):
        errorrate = groundtrue[i] - predictions_point[i]
        hasilkuadrat = errorrate * errorrate
        RMSElist.append(hasilkuadrat)
    RMSE = sum(RMSElist) / (len(predictions_point) - 2)
    RMSE = RMSE**(1 / 2)
    print(RMSE)

    getdataforecast = data._forecasting(5, 1)

    total_prediksi = 5
    takefrom = 5
    forecast_result = model.forecast(total_prediksi, getdataforecast, takefrom)
    # print(forecast_result[0])
    # forecast_result=np.append(forecast_result,[0.0])
    # print(forecast_result)

    n_steps = 8
    # split into samples
    X, y = split_sequence(forecast_result, n_steps)
    # reshape from [samples, timesteps] into [samples, timesteps, features]
    n_features = 1
    # print(X)
    X = X.reshape((X.shape[0], X.shape[1], n_features))
    # define model
    model = Sequential()
    model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features)))
    model.add(Dense(1))
    model.compile(optimizer='adam', loss='mse')
    # fit model
    model.fit(X, y, epochs=200, verbose=0)

    # demonstrate prediction
    for j in range(total_prediksi):
        getxlastnumber = array(forecast_result[(-n_steps - 1):-1])
        x_input = getxlastnumber
        # print(x_input)

        x_input = x_input.reshape((1, n_steps, n_features))
        yhat = model.predict(x_input, verbose=0)
        # print(yhat[0][0])

        forecast_result = np.append(forecast_result, yhat[0])
        # prediction_point=np.append(prediction_point,yhat[0])

    plot_results_onlypredicted(forecast_result)
Пример #28
0

def plot_results_multiple(predicted_data, true_data, prediction_len):
    fig = plt.figure(facecolor='white')
    ax = fig.add_subplot(111)
    ax.plot(true_data, label='True Data')
    # Pad the list of predictions to shift it in the graph to it's correct start
    for i, data in enumerate(predicted_data):
        padding = [None for p in range(i * prediction_len)]
        plt.plot(padding + data, label='Prediction')
        plt.legend()
    plt.show()


# %% Build/Train the model
model = Model()

model.build_model(configs)

# out-of memory generative training
steps_per_epoch = math.ceil(
    (data.len_train - configs['data']['sequence_length']) /
    configs['training']['batch_size'])

model.train_generator(data_gen=data.generate_train_batch(
    seq_len=configs['data']['sequence_length'],
    batch_size=configs['training']['batch_size'],
    normalise=configs['data']['normalise']),
                      epochs=configs['training']['epochs'],
                      batch_size=configs['training']['batch_size'],
                      steps_per_epoch=steps_per_epoch,
Пример #29
0
model_id = configs['model']['model_id']
save_dir = configs['model']['save_dir']

dataloader = DataLoader()
x_scaler_filename = save_dir + "/" + model_id + "-x.scaler"
y_scaler_filename = save_dir + "/" + model_id + "-y.scaler"
dataloader.restore_scalers(x_scaler_filename, y_scaler_filename)

filename = os.path.join('data', configs['data']['filename'])
dataframe = pandas.read_csv(filename, sep=',', encoding='utf-8')
dataframe.index.name = 'fecha'
x_data = dataframe.get(configs['data']['x_cols'], ).values

in_seq_len = configs['data']['input_sequence_length']
x_data = x_data[:, :]  # pick three sequences to make predictions
input_data = dataloader.prepare_input_data(x_data, in_seq_len)
print("Input vector shape: " + str(x_data.shape))

model_filename = sys.argv[2]
model = Model(configs['data']['output_mode'])
model.load_model(filepath=model_filename)

print("Plotting predictions point by point on validation set")
predictions = model.predict_point_by_point(input_data)
print(predictions.shape)
unscaled_predictions = dataloader.recompose_results(predictions[:, 0, :],
                                                    side="y")
plot_results(unscaled_predictions,
             x_data[configs['data']['input_sequence_length']:, :])
Пример #30
0
    cfg = json.load(json_data_file)

# getting data
f = gzip.open('../data/mnist.pkl.gz', 'rb')
training_set, validation_set, test_set = cPickle.load(f, encoding='latin1')
f.close()

training_label = np.zeros((training_set[1].size, training_set[1].max() + 1))
training_label[np.arange(training_set[1].size), training_set[1]] = 1

validation_label = np.zeros(
    (validation_set[1].size, validation_set[1].max() + 1))
validation_label[np.arange(validation_set[1].size), validation_set[1]] = 1

model = Model(cfg,
              input_layer={
                  "dtype": training_set[0].dtype,
                  "size": 784
              },
              output_layer={
                  "dtype": training_label.dtype,
                  "size": 10
              })
interface = Interface(
    cfg=cfg,
    model=model,
    train=(training_set[0], training_label),
    test=(validation_set[0], validation_label),
)
interface.start()