def setUp(self):
        # Need this for Resource Auth
        App.get_config()['SECRET_KEY'] = 'mysecrettestkey'

        self.db = DB
        # do once per test
        load_data('%s/data/fixtures.json' % TEST_DIR)
Exemple #2
0
    def train(self):
        train_data, test_data, n_labels = models.load_data(self.args)
        n_epochs = self.args['training']['n_epochs']
        batch_size = self.args['training']['batch_size']
        d_steps = self.args['training']['d_steps']
        log_interval = self.args['training']['log_interval']
        self.writer = torch.utils.tensorboard.SummaryWriter(
            log_dir=self.log_dir + '/run/')

        self.fixed_z = self.z_dist.sample((64, ))
        self.fixed_y = self.y_dist.sample((64, ))
        self.z_start = self.z_dist.sample((32, ))
        self.z_end = self.z_dist.sample((32, ))
        self.z_inter_list = metrics.slerp(self.z_start, self.z_end, 14)

        for eidx in trange(n_epochs, leave=True, desc='Epoch'):
            for iidx, real_batch in enumerate(tqdm(train_data)):
                noise_batch = self.z_dist.sample((batch_size, ))
                loss_d = self._train_d(real_batch=real_batch,
                                       noise_batch=noise_batch)
                self.writer.add_scalar('Loss/%d/d' % eidx,
                                       loss_d,
                                       global_step=iidx)

                if iidx % d_steps == 0:
                    # train G
                    if len(real_batch) == 2:
                        label_batch = real_batch[1]
                    else:
                        label_batch = None
                    loss_g = self._train_g(noise_batch=noise_batch,
                                           label_batch=label_batch)
                    self.writer.add_scalar('Loss/%d/g' % eidx,
                                           loss_g,
                                           global_step=iidx)
            if eidx % log_interval == 0:
                self._log(eidx, test_data)
def load_and_report():

    # fetching the current time as name
    name = names_of_fig()

    #  load Pricing test data
    all_contracts, p_sorted = load_data()

    # List contracts
    print(f"Showing all_contracts..........\n{all_contracts.tail(10)}\n")
    print(f"Showing p_sorted......\n{p_sorted.tail(10)}\n")



    # output from potential_pair
    ret, list_sect = potential_pairs(all_contracts, p_sorted)

    print(f"showing list_sect.......\n{list_sect}\n")
    print(f"showing ret........\n{ret.tail(10)}\n")

    # show the results of in sample testing
    ret.iloc[0] = 1
    ret.index = all_contracts.index
    plt.figure(figsize=(15, 7))
    plt.xlabel('Trade Date')
    plt.grid(True)
    plt.plot(ret)
    plt.legend(list(ret.columns))
    plt.show()
    plt.savefig(os.path.join("charts/sample test", "sample test " + name))

    # calculate the performance
    perf = ret.calc_stats()
    perf.display()
    perf.to_csv(sep=',', path="train_perfer.csv")

    # plot the maxinum drawndown of each pair
    ffn_ret = ffn.to_drawdown_series(ret)

    plt.figure(figsize=(15, 7))
    plt.grid(True)
    plt.plot(ffn_ret)
    plt.legend(list(ffn_ret.columns))
    plt.show()
    plt.savefig(os.path.join("charts/ffn drawdown", "ffn max drawdown " + name))

    # In sample back testing of portfolio

    port = ret.mean(axis=1)
    plt.figure(figsize=(15, 7))
    plt.grid(True)
    plt.plot(port)
    plt.show()
    #plt.legend(list(port.columns))
    plt.savefig(os.path.join("charts/testing of portfolio", "portfolio test " + name))

    perf = port.calc_stats()

    print(f"\n\nPrinting perf stats.......\n{perf.stats}\n")

    # In sample back testing of portfolio maxinum drawndown
    ffn_port = ffn.to_drawdown_series(port)
    plt.figure(figsize=(15, 7))
    plt.grid(True)
    plt.plot(ffn_port)
    #plt.legend(list(ffn_port.columns))
    plt.savefig(os.path.join("charts/back testing of portfolio maxinum drawndown", "maxinum drawndown " + name))

    ####################
    ##sample back testing######
    #####################
    test_ret, testing_data = sample_backtest(list_sect)

    print(f"\n\n\nShowing sample back testing- testing data tail.......\n\n{testing_data.tail(3)}\n")

    test_ret.iloc[0] = 1
    print(f"\n\nShowing sample backtesting test_ret tail.......\n\n{test_ret.tail(3)}")
    print(f"\n\nshowing test_ret shape......\n\n{test_ret.shape})")
    print(f"\n\n\nshowing test_ret index........\n\n{test_ret.index}")

    # plotting test_ret sample back testing
    plt.plot(test_ret)
    plt.legend(list(test_ret.columns))
    plt.savefig(os.path.join("charts/sample Backtesting/test_ret", "test_ret " + name))

    # Out sample back testing of portfolio

    port = test_ret.mean(axis=1)
    plt.figure(figsize=(15, 7))
    plt.grid(True)
    plt.plot(port)

    # plt.legend(list(port.columns))
    plt.savefig(os.path.join("charts/sample Backtesting/portfolio", "backtesting portfolio " + name))

    perf = port.calc_stats()
    print(perf.stats)

    ffn_backtest_sample = ffn.to_drawdown_series(port)
    plt.figure(figsize=(15, 7))
    plt.grid(True)
    plt.plot(ffn_backtest_sample)

    # plt.legend(list(ffn_backtest_sample.columns))
    plt.savefig(os.path.join("charts/sample Backtesting/ffn_drawdown_port", "drwadown_port " + name))

    return None
    #parser.add_argument('patch_shape', type=int,
    #                    help='The size of the input patch window.'),
    #parser.add_argument('label_patch_shape', type=int,
    #                    help='The size of the predicted patch window.'),
    #parser.add_argument('num_channels', type=int,
    #                    help='Number of channels in the dataset.'),
    #args = parser.parse_args()
    #path_testset = self.testset



    
    path_testset = '/home/local/USHERBROOKE/havm2701/data/DBFrames'
    batch_size = 100
    dataset = '/home/local/USHERBROOKE/havm2701/git.repos/Deep_VAMP/theano/mnist.pkl.gz'
    datasets = models.load_data(dataset)
    pdb.set_trace()
    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    pdb.set_trace()

    #train = VAMP(start=0,stop=10000,image_resize=[128,64],toronto_prepro=True)
    #valid = VAMP(start=10000,stop=12000,image_resize=[128,64],toronto_prepro=True)
    #valid = valid.get_reshaped_images()
    #train = train.get_reshaped_images()
    dataset ={}
    #train.y=np.argmax(train.y,axis=1)
    #train.y = np.asarray(train.y,dtype=np.32)
    #valid.y=np.argmax(valid.y,axis=1)
import numpy as np
import PIL.Image as pil
import matplotlib.pyplot as plt
import random as rng
import cv2 as cv
from models import load_data, view_image


print(__doc__)


data_dir = 'data/'
train_data, train_labels, sub = 'train_images.pkl', 'train_labels.csv', 'test_images.pkl'

# Load the data into variables and normalize data
X, y, sub = load_data(data_dir, train_data, train_labels, sub)

rng.seed(12345)

# Test some opencv bs to see if it works
def thresh_callback(val):
    threshold = val
    canny_output = cv.Canny(src_gray, threshold, threshold*2)
    _, contours, hierarchy = cv.findContours(canny_output, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
   
    # Bounding rectangles
    contours_poly = [None]*len(contours)
    boundRect = [None]*len(contours)
    centers = [None]*len(contours)
    radius = [None]*len(contours)
def get_wheel_sets():
    return load_data()
Exemple #7
0
 def setUp(self):
     self.db = DB
     # do once per test
     load_data('%s/data/minimal.json' % TEST_DIR)
Exemple #8
0
'''
@author: viet
Prototyping the thinning pipeline, might not use it tho
'''
from models import load_data, view_image
from models.img_processing import threshold_background
import numpy as np

import cv2 as cv
from PIL import Image

train_data, train_labels, sub_data = load_data('data/', 'train_images.pkl',
                                               'train_labels.csv',
                                               'test_images.pkl')

train_labels = train_labels['Category'].values  # Get labels

train_data, sub_data = (train_data)[:, :, :, None], (sub_data)[:, :, :, None]
train_data, sub_data = np.transpose(train_data, (0, 3, 1, 2)), np.transpose(
    sub_data, (0, 3, 1, 2))


def convert_to_3_channels(img_array):
    'Literally does what the name says it does'
    new_array = []
    for i in range(len(img_array)):
        e = img_array[i][0]
        new_image = [e, e, e]  # 3 channels
        new_array.append(new_image)
    return np.asarray(new_array)