Ejemplo n.º 1
0
    def generate_paths(self, fixed_seed=True, day_count=365.):
        if self.time_grid is None:
            self.generate_time_grid()
        M = len(self.time_grid)
        I = self.paths
        paths = np.zeros((M, I))
        paths_ = np.zeros_like(paths)
        paths[0] = self.initial_value
        paths_[0] = self.initial_value
        if self.correlated is False:
            rand = sn_random_numbers((1, M, I), fixed_seed=fixed_seed)

        else:
            rand = self.random_numbers
        for t in range(1, len(self.time_grid)):
            if self.correlated is False:
                ran = rand[t]
            else:
                ran = np.dot(self.cholesky_matirx, rand[:, t, :])
                ran = ran[self.rn_set]
            paths_[t] = (paths_[t - 1] + self.kappa *
                         (self.theta - npmaximun(0, paths_[t - 1, :])) *
                         self.volatility * np.sqrt(dt) * ran)
            paths[t] = np.maximun(0, paths_[t])
            self.instrument_values = paths
Ejemplo n.º 2
0
def train(epoch):
    model.train()

    train_loss = 0
    temperature = opt.temperature
    for bi, (data, _) in enumerate(train_loader):
        data = data.to(device)

        optimizer.zero_grad()

        recon_output, encode_output = model(data, temperature)

        loss = loss_function(recon_output, data, encode_output)

        loss.backward()

        train_loss += loss[0].item()

        optimizer.step()

        if (bi + 1) % 100 == 0:
            temperature = np.maximun(temperature * np.exp(-ANNEAL_RATE * bi),
                                     temperature_min)

        if (bi + 1) % opt.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, bi * len(data), len(train_loader.dataset),
                100. * bi / len(train_loader), loss.data[0] / len(data)))

        print('====> Epoch: {} Average loss: {:.4f}'.format(
            epoch, train_loss / len(train_loader.dataset)))
Ejemplo n.º 3
0
def test(epoch):
    model.eval()
    test_loss = 0
    temperature = opt.temperature
    with torch.no_grad():
        for bi, (data, _) in enumerate(test_loader):
            data.to(device)

            recon_output, encode_output = (data, temperature)
            test_loss += loss_function(recon_output, data,
                                       encode_output)[0].item()

            if (bi + 1) % 100 == 0:
                temperature = np.maximun(
                    temperature * np.exp(-ANNEAL_RATE * bi), temperature_min)

            if (bi + 1) % 100 == 0:
                n = min(data.size(0), 8)
                comparison = torch.cat([
                    data[:n],
                    recon_output.view(opt.batch_size, 1, 28, 28)[:n]
                ])
                save_image(comparison.data.cpu(),
                           'results/reconstruction_' + str(epoch) + '.png',
                           nrow=n)

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss))
Ejemplo n.º 4
0
def compute_ap(recall,precision):
    '''
    compute the average precision given the recall and precision curve
    #arguments:
        recall:list
        precision:list
    '''
    #correct Ap calculation
    mrec = np.concatenate(([0.0],recall,[1.0]))
    mpre = np.concatenate(([0.0],precision,[0.0]))

    #calculate the precision envelope
    for i in range(mpre.size-1,0,-1):
        mpre[i-1] = np.maximun(mpre[i-1],m[i])

    #to calculate area under PR curve ,look for points
    #where x axis  (recall) changes value
    i = up.where(mrec[1:] != mrec[:-1])[0]

    ap = np.sum((mrec[i+1] - mrec[i])* mpre[i+1])
    return ap
Ejemplo n.º 5
0
                deltas[0], axis=0, keepdims=True) * lr
            neural_net[l].w = neural_net[l].w - out[l][1].T @ deltas[0] * lr

    return out[-1][1]


def create_nn(topology, act_f):
    nn = []  # Vector de capas
    for l, layer in enumerate(topology[:-1]):
        nn.append(neural_layer(topology[l], topology[l + 1], act_f))
    return nn


# Funciones de activacion
sigm = (lambda x: 1 / (1 + np.e**(-x)), lambda x: x * (1 - x))
relu = lambda x: np.maximun(0, x)

# Dataset
n = 500  # Population
p = 2  # Characteristics
from sklearn.datasets import make_blobs
X, Y = make_blobs(n_samples=n,
                  cluster_std=0.5,
                  n_features=2,
                  centers=[(-1, 1), (1, 1)])
Y = Y[:, np.newaxis]
#show info
plt.scatter(X[Y[:, 0] == 1, 0], X[Y[:, 0] == 1, 1], color='salmon')
plt.scatter(X[Y[:, 0] == 0, 0], X[Y[:, 0] == 0, 1], color='skyblue')
plt.axis('equal')
plt.show()
Ejemplo n.º 6
0
# H is hidden dimension; D_out is output dimension
N, D_in, H, D_out = 64, 1000, 100, 10

# Create random input and output data
x = np.random.randn(N, D_in)
y = np.random.randn(N, D_out)

# Randomly initialize weights
w1 = np.random.randn(D_in, H)
w2 = np.random.randn(H, D_out)

lr = 1e-6
for t in range(500):
    # forward pass
    h = x.dot(w1)
    h_relu = np.maximun(h, 0)
    y_pred = h_relu.dot(w2)

    loss = np.square(y_pred - y).sum()
    print(t, loss)

    #backprop
    grad_y_pred = 2 * (y_pred - y)
    grad_w2 = h_relu.T.dot(grad_y_pred)
    grad_h_relu = grad_y_pred.dot(w2.T)
    grad_h = grad_h_relu.copy()
    grad_h[h < 0] = 0
    grad_w1 = x.T.dot(grad_h)

    #update weights
    w1 -= lr * grad_w1
Ejemplo n.º 7
0
def relu(z):
    return np.maximun(0, z)
def relu(x):
    return np.maximun(0, x)
Ejemplo n.º 9
0
def LeakyReLU(x, alpha):  ### Leaky Rectified Linear Unit activation
    ## If 'alpha' is equal to zero, then it becomes a standard ReLU
    return np.maximun(x, alpha * x)
Ejemplo n.º 10
0
def run_cppi(risky_r,
             safe_r=None,
             start=1000,
             floor=0.8,
             drawdown=None,
             riskfree_rate=0.03,
             m=3):
    """
    Runs a backtest of the CPPI strategy, given a set of returns for the risky asset
    Returns a dictionary containing: Asset Value History, Risk budged History, Risky Weight History
    """
    #set up CPPI Parameters
    dates = risky_r.index
    n_steps = len(dates)
    account_value = start
    floor_value = start * floor
    peak = start
    m = m

    if isinstance(risky_r, pd.Series):
        risky_r = pd.DataFrame(risky_r, columns["R"])

    if safe_r is None:
        safe_r = pd.DataFrame().reindex_like(risky_r)
        safe_r.values[:] = riskfree_rate / 12  #fast way to fill with numbers

    account_history = pd.DataFrame().reindex_like(risky_r)
    risky_w_history = pd.DataFrame().reindex_like(risky_r)
    cushion_history = pd.DataFrame().reindex_like(risky_r)

    for step in range(n_steps):
        if drawdown is not None:
            peak = np.maximun(peak, account_value)
            floor_value = peak * (1 - drawdown)
        cushion = (account_value - floor_value) / account_value
        risky_w = m * cushion
        risky_w = np.minimum(risky_w, 1)  #wont leverage
        risky_w = np.maximum(risky_w, 0)  #wont go short
        safe_w = 1 - risky_w

        risky_alloc = account_value * risky_w
        safe_alloc = account_value * safe_w

        ##update the account value for this timestamp

        account_value = (risky_alloc *
                         (1 + risky_r.iloc[step])) + (safe_alloc *
                                                      (1 + safe_r.iloc[step]))
        ## save the current values

        cushion_history.iloc[step] = cushion
        risky_w_history.iloc[step] = risky_w
        account_history.iloc[step] = account_value

    risky_wealth = start * (1 + risky_r).cumprod()

    backtest_result = {
        "Wealth": account_history,
        "Risky Wealth": risky_wealth,
        "Risk Budget": cushion_history,
        "Risky Allocation": risky_w_history,
        "m": m,
        "start": start,
        "risky_r": risky_r,
        "safe_r": safe_r
    }
    return backtest_result
Ejemplo n.º 11
0
 def forward(self,inputs):
     self.output = np.maximun(0,inputs)
Ejemplo n.º 12
0
import numpy as np
import matplotlib.pyplot as plt

D = np.random.randn(1000, 500)
hidden_layer_sizes = [500] * 10
#***************active funtion "tanh"
nonlinearities = ['tanh'] * len(hidden_layer_sizes)
#***************active funtion "tanh"
#nonlinearities = ['relu']*len(hidden_layer_sizes)

act = {'relu': lambda x: np.maximun(0, x), 'tanh': lambda x: np.tanh(x)}
Hs = {}
for i in xrange(len(hidden_layer_sizes)):
    X = D if i == 0 else Hs[i - 1]  #input layer
    fan_in = X.shape[1]
    fan_out = hidden_layer_sizes[i]

    #-------little random number
    #W = np.random.randn(fan_in, fan_out) * 0.01
    #-------zero
    #W = 0
    #-------big number
    W = np.random.randn(fan_in, fan_out) * 1
    #-------xavier and tanh (xavier and relu)
    #W = np.random.randn(fan_in, fan_out) / np.sqrt(fan_in)
    #-------Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification by He et al., 2015
    #W = np.random.randn(fan_in, fan_out) / np.sqrt(fan_in/2)

    H = np.dot(X, W)
    H = act[nonlinearities[i]](H)
    Hs[i] = H  # cache result on this layer
Ejemplo n.º 13
0
np.modf(x) : 将数组各元素的小数和整数部分以两个独立数组形式返回

np.cos(x)  np.cosh(x)  np.sin(x)  np.sinh(x)  np.tan(x)  np.tanh(x)  :  计算数组各元素的普通型和双曲型三角函数

np.exp(x) :计算数组各元素的指数值

np.sign(x) :计算数组各元素的符号值,1(+),0,-1(-)

"""

a = np.array([2, 3, -4, 5])
print(np.sign(a))  # [ 1  1 -1  1]
print(np.modf(a))  # (array([ 0.,  0., -0.,  0.]), array([ 2.,  3., -4.,  5.]))
print(np.square(a))  # [ 4  9 16 25]
"""
二元函数
+ - * /  **   :    两个数组各元素进行对应运算

np.maximun(x,y) 或np.fmax() :  元素级的最大值

np.minimun(x,y) 或np.fmin() :   元素级的最小值

np.mod(x, y) :  元素级的模运算

np.copysign(x, y) :  将数组y中各元素值的符号赋值给数组x对应的元素

>  <  >=  <=  ==  !=   :   算术比较,产生布尔型数组


"""