コード例 #1
0
    def __init__(self, checkpoint_name):
        ### Configurations
        self.config = {
            'data_path': "data",
            'input_shape': (128, 128, 1),
            'output_shape': (128, 128, 1),
            'batch_size': 2,
            'epochs': 100,
            'sample_interval': 200,
            'df': 64,
            'patch': 32
        }
        # Calculate output shape of D (PatchGAN)
        self.img_rows = int(self.config['input_shape'][0])
        self.disc_patch = (self.config['patch'], self.config['patch'], 1)
        self.data_loader = DataLoader(dataset=self.config['data_path'])
        self.checkpoint_name = checkpoint_name

        self.generator = None
        self.discriminator = None
        self.combined = None
        self.imgs_trn = None
        self.msks_trn = None
        self.imgs_val = None
        self.msks_val = None
        log_path = 'Graph/addn'
        self.callback = TensorBoard(log_path)
        return
コード例 #2
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])
    batch_size = configs['training']['batch_size']
    data = DataLoader(os.path.join('data', configs['data']['filename']))

    model = Model()

    model.build_model(configs)

    x_train, y_train, x_val, y_val, x_test, y_test = data.get_data(configs)

    start_dt = dt.datetime.now()
    estimator = model.train(x_train,
                            y_train,
                            x_val,
                            y_val,
                            epochs=configs['training']['epochs'],
                            batch_size=batch_size)
    end_dt = dt.datetime.now()
    print('Time taken to train model: %s' % (end_dt - start_dt))

    #print(estimator.history.keys())

    plot(estimator.history['acc'],
         estimator.history['val_acc'],
         label1='Train accuracy',
         label2='Test accuracy')
    plot(estimator.history['loss'],
         estimator.history['val_loss'],
         label1='Train loss',
         label2='Test loss')

    predicted_prob = model.predict(x_test)

    prediction = np.where(predicted_prob >= 0.5, 1, 0)

    print('We got ', np.sum(prediction), ' misclassified cases out of ',
          len(y_test), ' total cases.')
コード例 #3
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()

    model.build_model(configs)

    x_test, y_test = data.get_data(seq_len=configs['data']['sequence_length'],
                                   train=False)

    x, y = data.get_data(seq_len=configs['data']['sequence_length'],
                         train=True)

    estimator = model.train(x,
                            y,
                            x_test,
                            y_test,
                            epochs=configs['training']['epochs'],
                            batch_size=configs['training']['batch_size'],
                            save_dir=configs['model']['save_dir'])

    #plot(estimator.history['acc'], estimator.history['val_acc'], label1='Train acc', label2='Test acc')
    plot(estimator.history['loss'],
         estimator.history['val_loss'],
         label1='Train loss',
         label2='Test loss')

    predictions = model.predict(x_test)

    predictions = data.inverse_normalize(predictions)
    y_test = data.inverse_normalize(np.squeeze(y_test))

    furure_prediction = model.future_prediction(
        x_test, configs['data']['sequence_length'],
        configs['data']['future_period'])
    furure_prediction = data.inverse_normalize(furure_prediction)
    furure_prediction = np.concatenate((predictions, furure_prediction),
                                       axis=None)

    y_test = np.squeeze(y_test)

    plot(furure_prediction, y_test, label1='Prediction', label2='True values')
コード例 #4
0
N = 10
seq_len = 50
sample_gap = 50

def AdaRegress(X, y, n_estimators, random_st):
    regr = AdaBoostRegressor(n_estimators=n_estimators, random_state=random_st)
    regr.fit(X, y)
    return regr

if __name__ == '__main__':
    if os.path.exists(path="./indicator_data/mydata_{0}_{1}_{2}.pickle".format(N, seq_len, sample_gap)):
        with open("./indicator_data/mydata_{0}_{1}_{2}.pickle".format(N, seq_len, sample_gap), 'rb') as load_data:
            (train_input, train_label), (dev_input, dev_label), (test_input, test_label) = pickle.load(load_data)
    else:
        (train_input, train_label), (dev_input, dev_label), (test_input, test_label) = DataLoader("./data.csv", N, seq_len, sample_gap)
        with open("./indicator_data/mydata_{0}_{1}_{2}.pickle".format(N, seq_len, sample_gap), 'wb') as save_data:
            data_list = [(train_input, train_label), (dev_input, dev_label), (test_input, test_label)]
            pickle.dump(data_list, save_data)

    pca = PCA(n_components=540)
    train_input = train_input.reshape(train_input.shape[0], -1)
    print(train_input.shape)
    dev_input = dev_input.reshape(dev_input.shape[0], -1)
    test_input = test_input.reshape(test_input.shape[0], -1)
    train_input = pca.fit_transform(train_input)
    dev_input = pca.fit_transform(dev_input)
    test_input = pca.fit_transform(test_input)

    regressor = AdaRegress(train_input, np.array(train_label), 100, 0)
コード例 #5
0
class ADDN():
    def __init__(self, checkpoint_name):
        ### Configurations
        self.config = {
            'data_path': "data",
            'input_shape': (128, 128, 1),
            'output_shape': (128, 128, 1),
            'batch_size': 2,
            'epochs': 100,
            'sample_interval': 200,
            'df': 64,
            'patch': 32
        }
        # Calculate output shape of D (PatchGAN)
        self.img_rows = int(self.config['input_shape'][0])
        self.disc_patch = (self.config['patch'], self.config['patch'], 1)
        self.data_loader = DataLoader(dataset=self.config['data_path'])
        self.checkpoint_name = checkpoint_name

        self.generator = None
        self.discriminator = None
        self.combined = None
        self.imgs_trn = None
        self.msks_trn = None
        self.imgs_val = None
        self.msks_val = None
        log_path = 'Graph/addn'
        self.callback = TensorBoard(log_path)
        return

    @property
    def checkpoint_path(self):
        return 'checkpoints/%s' % (self.checkpoint_name)

    def compile(self):
        optimizer = Adam(0.0002, 0.5)
        # Build and compile the discriminator
        self.discriminator = build_discriminator(self.config['input_shape'],
                                                 self.config['df'])
        self.discriminator.compile(loss='mse',
                                   optimizer=optimizer,
                                   metrics=['accuracy'])
        # Build the generator
        self.generator = get_densenet(self.config['input_shape'])
        #self.generator =get_generator(self.config['input_shape'])
        img = Input(shape=self.config['input_shape'])
        label = Input(shape=self.config['input_shape'])
        seg = self.generator(img)
        self.discriminator.trainable = False
        valid = self.discriminator([seg, img])
        self.combined = Model(inputs=[label, img], outputs=[valid, seg])
        self.combined.compile(loss=['mse', dice_coef_loss],
                              loss_weights=[1, 100],
                              optimizer=optimizer)
        self.callback.set_model(self.generator)
        return

    def train(self, sample=False):
        start_time = datetime.datetime.now()
        # Adversarial loss ground truths
        valid = np.ones((self.config['batch_size'], ) + self.disc_patch)
        fake = np.zeros((self.config['batch_size'], ) + self.disc_patch)
        for epoch in range(self.config['epochs']):
            for batch_i, (imgs, labels) in enumerate(
                    self.data_loader.load_batch(self.config['batch_size'])):
                # Condition on B and generate a translated version

                # Train the discriminators (original images = real / generated = Fake)
                segs = self.generator.predict(imgs)
                d_loss_real = self.discriminator.train_on_batch([labels, imgs],
                                                                valid)
                d_loss_fake = self.discriminator.train_on_batch([segs, imgs],
                                                                fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
                # Train the generators
                g_loss = self.combined.train_on_batch([labels, imgs],
                                                      [valid, labels])
                elapsed_time = datetime.datetime.now() - start_time
                # Plot the progress
                print(
                    "[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %f] time: %s"
                    % (epoch, self.config['epochs'], batch_i,
                       self.data_loader.n_batches, d_loss[0], 100 * d_loss[1],
                       g_loss[0], elapsed_time))
                self.generator.save_weights('%s/weights_loss_trn.weights' %
                                            self.checkpoint_path)

                # If at save interval => save generated image samples
                # if sample == True:
                #     if batch_i % self.config['sample_interval'] == 0:
                #         self.sample_images(epoch, batch_i)
            train_names = 'train_loss'
            write_log(self.callback, train_names, g_loss[4], epoch)

            imgs, labels = self.data_loader.load_img(2)
            g_val = self.combined.test_on_batch([labels, imgs],
                                                [valid, labels])
            val_names = 'val_acc'
            write_log(self.callback, val_names, g_val[4], epoch)
        return

    def predict(self, imgs):
        return self.generator.predict(imgs)

    def sample_images(self, epoch, batch_i):
        # r, c = 3, 3
        imgs, labels = self.data_loader.load_img(batch_size=1)
        segs = self.predict(imgs)
        print(segs.shape)
        imsave("checkpoints/ADDN/images/%d_%d.tif" % (epoch, batch_i),
               segs[0][:, :, 0])
コード例 #6
0
def generate_by_ft(N, seq_len, sample_gap):
    """
    Generating features by featuretools

    """
    if os.path.exists(path="./raw_data/mydata_{0}_{1}_{2}.pickle".format(N, seq_len, sample_gap)):
        with open("./raw_data/mydata_{0}_{1}_{2}.pickle".format(N, seq_len, sample_gap), 'rb') as load_data:
            (train_input, train_label), (dev_input, dev_label), (test_input, test_label) = pickle.load(load_data)
    else:
        (train_input, train_label), (dev_input, dev_label), (test_input, test_label) = DataLoader("./data.csv", N, seq_len, sample_gap)
        with open("./raw_data/mydata_{0}_{1}_{2}.pickle".format(N, seq_len, sample_gap), 'wb') as save_data:
            data_list = [(train_input, train_label), (dev_input, dev_label), (test_input, test_label)]
            pickle.dump(data_list, save_data)
    train_input = np.array(train_input).reshape(train_input.shape[0], seq_len, -1)
    train_label = np.array(train_label)
    dev_input = np.array(dev_input).reshape(dev_input.shape[0], seq_len, -1)
    dev_label = np.array(dev_label)
    test_input = np.array(test_input).reshape(test_input.shape[0], seq_len, -1)
    test_label = np.array(test_label)
    print(train_input.shape, dev_input.shape, test_input.shape)
    train_df = pd.DataFrame(train_input.reshape(-1, train_input.shape[2]))
    dev_df = pd.DataFrame(dev_input.reshape(-1, dev_input.shape[2]))
    test_df = pd.DataFrame(test_input.reshape(-1, test_input.shape[2]))
    train_df.columns = ['midprice', 'lastprice', 'volume', 'lastvolume', 'turnover', 'lastturnover', 'upper', 'lower',\
                        'askprice5', 'askprice4', 'askprice3', 'askprice2', 'askprice1', 'bidprice1', 'bidprice2', 'bidprice3', 'bidprice4', 'bidprice5', \
                        'askvolume5', 'askvolume4', 'askvolume3', 'askvolume2', 'askvolume1', 'bidvolume1', 'bidvolume2', 'bidvolume3', 'bidvolume4', 'bidvolume5']
    dev_df.columns = ['midprice', 'lastprice', 'volume', 'lastvolume', 'turnover', 'lastturnover', 'upper', 'lower',\
                        'askprice5', 'askprice4', 'askprice3', 'askprice2', 'askprice1', 'bidprice1', 'bidprice2', 'bidprice3', 'bidprice4', 'bidprice5', \
                        'askvolume5', 'askvolume4', 'askvolume3', 'askvolume2', 'askvolume1', 'bidvolume1', 'bidvolume2', 'bidvolume3', 'bidvolume4', 'bidvolume5']
    test_df.columns = ['midprice', 'lastprice', 'volume', 'lastvolume', 'turnover', 'lastturnover', 'upper', 'lower',\
                        'askprice5', 'askprice4', 'askprice3', 'askprice2', 'askprice1', 'bidprice1', 'bidprice2', 'bidprice3', 'bidprice4', 'bidprice5', \
                        'askvolume5', 'askvolume4', 'askvolume3', 'askvolume2', 'askvolume1', 'bidvolume1', 'bidvolume2', 'bidvolume3', 'bidvolume4', 'bidvolume5']

    bidvolume = train_df.iloc[:, 23:]
    bidvolume_dev = dev_df.iloc[:, 23:]
    bidvolume_test = test_df.iloc[:, 23:]

    askvolume = train_df.iloc[:, 18:23]
    askvolume_dev = dev_df.iloc[:, 18:23]
    askvolume_test = test_df.iloc[:, 18:23]

    bidprice = train_df.iloc[:, 13:18]
    bidprice_dev = dev_df.iloc[:, 13:18]
    bidprice_test = test_df.iloc[:, 13:18]

    askprice = train_df.iloc[:, 8:13]
    askprice_dev = dev_df.iloc[:, 8:13]
    askprice_test = test_df.iloc[:, 8:13]

    others = train_df.iloc[:, 0:8]
    others_dev = dev_df.iloc[:, 0:8]
    others_test = test_df.iloc[:, 0:8]

    train_input = get_ft_features(askprice, bidprice, askvolume, bidvolume, others)
    dev_input = get_ft_features(askprice_dev, bidprice_dev, askvolume_dev, bidvolume_dev, others_dev)
    test_input = get_ft_features(askprice_test, bidprice_test, askvolume_test, bidvolume_test, others_test)
    print(train_input.shape, dev_input.shape, test_input.shape)

    with open("./raw_data/ftdata.pickle".format(N, seq_len, sample_gap), 'wb') as save_data:
        data_list = [(train_input, train_label), (dev_input, dev_label), (test_input, test_label)]
        pickle.dump(data_list, save_data)
コード例 #7
0
def generate_by_wavelet(N, seq_len, sample_gap):
    """
    Generating features by wavelet transformation

    """
    if os.path.exists(path="./raw_data/mydata_{0}_{1}_{2}.pickle".format(N, seq_len, sample_gap)):
        with open("./raw_data/mydata_{0}_{1}_{2}.pickle".format(N, seq_len, sample_gap), 'rb') as load_data:
            (train_input, train_label), (dev_input, dev_label), (test_input, test_label) = pickle.load(load_data)
    else:
        (train_input, train_label), (dev_input, dev_label), (test_input, test_label) = DataLoader("./data.csv", N, seq_len, sample_gap)
        with open("./raw_data/mydata_{0}_{1}_{2}.pickle".format(N, seq_len, sample_gap), 'wb') as save_data:
            data_list = [(train_input, train_label), (dev_input, dev_label), (test_input, test_label)]
            pickle.dump(data_list, save_data)

    for i in range(train_input.shape[2]):
        train_input[:, :, i] = wavelet_denoising(train_input[:, :, i])
        dev_input[:, :, i] = wavelet_denoising(dev_input[:, :, i])
        test_input[:, :, i] = wavelet_denoising(test_input[:, :, i])

    return train_input, dev_input, test_input