Esempio n. 1
0
    def __init__(
        self,
        p: int,
        q: int = 2,
        sparsity: int = -1,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
    ):
        """Generates a normalized random projection vector (for initialization purposes).

        Args:
            p: The dimension of the vector.
            q: The order of ell^q unit ball from which to sample.
            sparsity: The number of non-zero coordinates; pass -1 for a dense vector.
            random_state: NumPy random state.
        """
        super().__init__(q=q)
        _rs = check_random_state(random_state)
        if sparsity > 0:
            q_generalized_normal = np.zeros((p, 1))
            idxs = _rs.choice(a=p, size=sparsity, replace=False)
            q_generalized_normal[idxs, 0] = gennorm.rvs(beta=q, size=sparsity)
        else:
            q_generalized_normal = gennorm.rvs(beta=q, size=(p, 1))
        self.beta = q_generalized_normal
        self._normalize()
Esempio n. 2
0
def generate_dist_centroids(number_of_wires, loc, scale, beta=5):
    '''
    Generates the 2D coordinates from a general normal distribution
    '''

    # This is a uniform random distribution
    #xc = np.random.rand(number_of_wires) * Lx # uniform random in [0,Lx) for each coordinate dimension
    #yc = np.random.rand(number_of_wires) * Ly # uniform random in [0,Lx) for each coordinate dimension

    xc = gennorm.rvs(beta, loc=loc, scale=scale, size=int(number_of_wires))
    yc = gennorm.rvs(beta, loc=loc, scale=scale, size=int(number_of_wires))

    return xc, yc
Esempio n. 3
0
def main():
    beta = 1.3
    mean, var, skew, kurt = gennorm.stats(beta, moments='mvsk')
    print('Real params:\n\t- Mean: %f\n\t- Var: %f\n\t- Beta: %f' %
          (mean, var, beta))

    # Generate histogram
    r = gennorm.rvs(beta, size=1000)

    # Get params
    beta_est, mean_est, var_est = gennorm.fit(r)
    print('Fitted params:\n\t- Mean: %f\n\t- Var: %f\n\t- Beta: %f' %
          (mean, var, beta))

    # Generate pdf
    x = np.linspace(gennorm.ppf(0.01, beta_est), gennorm.ppf(0.99, beta_est),
                    100)
    y = gennorm.pdf(x, beta_est)

    fig, ax = plt.subplots(1, 1)
    ax.hist(r,
            bins=100,
            color='b',
            density=True,
            histtype='stepfilled',
            alpha=0.3)
    ax.legend(loc='best', frameon=False)

    ax.plot(x, y, 'r-', lw=5, alpha=0.6, label='gennorm pdf')
    plt.show()
 def make_emiss_flow(self,max_steps):
     step = 0
     while step <= max_steps: 
             time_emiss = gennorm.rvs(2.786, 0.371, 0.143, size=1, random_state=None)*10000
             time_emiss = int(round(time_emiss[0]))
             step += time_emiss
             self.transaction_emiss_flow.append(step)
     return
    def __call__(self, shape):
        if self.apply_scale:
            sqrt_n = np.sqrt(shape[1])
        else:
            sqrt_n = 1

        D = gennorm.rvs(beta=self.beta, size=shape)
        # This is the variance of d
        var = gamma(3./self.beta)/gamma(1./self.beta)
        # Make D have unit variance
        D = D/np.sqrt(var)
        
        # Return correctly scaled version of D
        return D/sqrt_n*self.std
Esempio n. 6
0
def main():
    # Build model
    print('Loading model ...\n')
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    model.load_state_dict(
        torch.load(os.path.join(opt.logdir, 'model_Best.pth')))
    model.eval()
    # load data info
    print('Loading data info ...\n')
    files_source = glob.glob(os.path.join('data', opt.test_data, '*.png'))
    files_source.sort()
    # process data
    psnr_test = 0
    for f in files_source:
        # image
        Img = cv2.imread(f)
        Img = normalize(np.float32(Img[:, :, 0]))
        Img = np.expand_dims(Img, 0)
        Img = np.expand_dims(Img, 1)
        ISource = torch.Tensor(Img)
        # noise
        #noise = torch.FloatTensor(ISource.size()).uniform_(-1.732*opt.test_noiseL/255.,1.732*opt.test_noiseL/255.)
        flatSize = getSize(ISource)
        alpha = getAlpha(opt.test_noiseL / 225., opt.beta)
        noise = torch.FloatTensor(
            gennorm.rvs(opt.beta,
                        scale=alpha,
                        size=flatSize,
                        random_state=None))
        noise = noise.view(ISource.size())
        # noisy image
        INoisy = ISource + noise
        ISource, INoisy = Variable(ISource.cuda()), Variable(INoisy.cuda())
        with torch.no_grad():  # this can save much memory
            Out = torch.clamp(INoisy - model(INoisy), 0., 1.)
        ## if you are using older version of PyTorch, torch.no_grad() may not be supported
        # ISource, INoisy = Variable(ISource.cuda(),volatile=True), Variable(INoisy.cuda(),volatile=True)
        # Out = torch.clamp(INoisy-model(INoisy), 0., 1.)
        psnr = batch_PSNR(Out, ISource, 1.)
        psnr_test += psnr
        print("%s PSNR %f" % (f, psnr))
    psnr_test /= len(files_source)
    print("\nPSNR on test data %f" % psnr_test)
Esempio n. 7
0
def estimate_drawdown_for_probability(daily_returns, prob, trading_days=250, n=1000):
    """
    Calculates the drawdown corresponding to the probability passed in by fitting a general normal distribution
    and simulating returns for a year to generate a CDF (a fn of: independent var - drawdown , dependent var -
    P(drawdown >= prob)
    Note if you want to find the moments of the fitted distribution do:
    (mean, var, skew, kurtosis) = scipy.stats.gennorm.stats(b_sample, moments='mvsk')
    May in the future want to check if the distribution is a good fit or not
    :param daily_returns: time series of empirical daily returns
    :param prob: probability (float) for which to find the corresponding drawdown
    :param trading_days: the number of trading days expected in a year
    :param n: number of simulations to run
    :return: a drawdown number in percentage terms; will be negative (e.g. -0.21) and the parameter corresponding to
    the fitted generalized normal distribution
    """
    drawdown_indexes = [-float(i)/100. for i in range(0, 100)]
    if prob < 0 or prob > 1:
        raise ValueError('Parameter prob must be between 0 and 1')
    b, loc, scale = gennorm.fit(daily_returns)
    (mean, var, skew, kurtosis) = gennorm.stats(b, scale=scale, moments='mvsk')

    simulated_drawdowns = []
    datetime_index = [dt.datetime(2010, 1, 1) + dt.timedelta(days=t) for t in range(0, trading_days)]
    for i in range(0, n):
        simulated_daily_returns = gennorm.rvs(b, loc=loc, scale=scale, size=trading_days)
        s = pd.Series(simulated_daily_returns, index=datetime_index)
        return_series = s.cumsum()
        (max_drawdown, dd_length) = max_dd(return_series)
        simulated_drawdowns.append(max_drawdown)
    simulated_drawdowns_series = pd.Series(simulated_drawdowns)
    empirical_probabilities = []
    for d in drawdown_indexes:
        incidence_count = (simulated_drawdowns_series <= d).sum()
        empirical_probabilities.append(float(incidence_count) / float(n))

    #will have least negative drawdown first in drawdown_indexes and highest probability empirical_probabilities first
    #we are conservative: it will select the higher probability drawdown upon non-match (less negative / closer in)
    cdf_series = pd.Series(empirical_probabilities, index=drawdown_indexes)
    matching_drawdown = cdf_series[cdf_series.values >= prob].index[-1]
    return (matching_drawdown, cdf_series, b, loc, scale)
Esempio n. 8
0
def main():
    # Load dataset
    print('Loading dataset ...\n')
    dataset_train = Dataset(train=True)
    dataset_val = Dataset(train=False)
    loader_train = DataLoader(dataset=dataset_train, num_workers=4, batch_size=opt.batchSize, shuffle=True)
    print("# of training samples: %d\n" % int(len(dataset_train)))
    # Build model
    net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    net.apply(weights_init_kaiming)
    criterion = nn.MSELoss(size_average=False)
    # Move to GPU
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    criterion.cuda()
    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # training
    writer = SummaryWriter(opt.outf)
    step = 0
    noiseL_B=[0,55] # ingnored when opt.mode=='S'
    PSNRs = []
    for epoch in range(opt.epochs):
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / 10.
        # set learning rate
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('learning rate %f' % current_lr)
        # train
        for i, data in enumerate(loader_train, 0):
            # training step
            model.train()
            model.zero_grad()
            optimizer.zero_grad()
            img_train = data
            if opt.mode == 'S':
                noise = torch.FloatTensor(img_train.size()).normal_(mean=0, std=opt.noiseL/255.)
            if opt.mode == 'B':
                noise = torch.zeros(img_train.size())
                stdN = np.random.uniform(noiseL_B[0], noiseL_B[1], size=noise.size()[0])/255.
                for n in range(noise.size()[0]):
                    #sizeN = noise[0,:,:,:].size()
                    flatSize = getSize(noise[0,:,:,:])
                    alpha = getAlpha(stdN[n],opt.beta)
                    noise_temp = torch.FloatTensor(gennorm.rvs(opt.beta, scale = alpha, size = flatSize, random_state = None))
                    noise_temp_tensor = noise_temp.view(noise[0,:,:,:].size())
                    noise[n,:,:,:] = noise_temp_tensor
            imgn_train = img_train + noise
            img_train, imgn_train = Variable(img_train.cuda()), Variable(imgn_train.cuda())
            noise = Variable(noise.cuda())
            out_train = model(imgn_train)
            loss = criterion(out_train, noise) / (imgn_train.size()[0]*2)
            loss.backward()
            optimizer.step()
            # results
            model.eval()
            out_train = torch.clamp(imgn_train-model(imgn_train), 0., 1.)
            psnr_train = batch_PSNR(out_train, img_train, 1.)
            print("[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f" %
                (epoch+1, i+1, len(loader_train), loss.item(), psnr_train))
            # if you are using older version of PyTorch, you may need to change loss.item() to loss.data[0]
            if step % 10 == 0:
                # Log the scalar values
                writer.add_scalar('loss', loss.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
            step += 1
        ## the end of each epoch
        model.eval()
        # validate
        psnr_val = 0
        for k in range(len(dataset_val)):
            img_val = torch.unsqueeze(dataset_val[k], 0)
            noise = torch.FloatTensor(img_val.size()).normal_(mean=0, std=opt.val_noiseL/255.)
            imgn_val = img_val + noise
            img_val, imgn_val = Variable(img_val.cuda(), volatile=True), Variable(imgn_val.cuda(), volatile=True)
            out_val = torch.clamp(imgn_val-model(imgn_val), 0., 1.)
            psnr_val += batch_PSNR(out_val, img_val, 1.)
        psnr_val /= len(dataset_val)
        print("\n[epoch %d] PSNR_val: %.4f" % (epoch+1, psnr_val))
        writer.add_scalar('PSNR on validation data', psnr_val, epoch)
        # log the images
        out_train = torch.clamp(imgn_train-model(imgn_train), 0., 1.)
        Img = utils.make_grid(img_train.data, nrow=8, normalize=True, scale_each=True)
        Imgn = utils.make_grid(imgn_train.data, nrow=8, normalize=True, scale_each=True)
        Irecon = utils.make_grid(out_train.data, nrow=8, normalize=True, scale_each=True)
        writer.add_image('clean image', Img, epoch)
        writer.add_image('noisy image', Imgn, epoch)
        writer.add_image('reconstructed image', Irecon, epoch)
        # save model
        #torch.save(model.state_dict(), os.path.join(opt.outf, 'net.pth'))
        model_file = 'model_' + str(epoch) + '.pth'
        torch.save(model.state_dict(), os.path.join(opt.outf, model_file))
        PSNRs.append(psnr_val)
        modelBest_file = 'model_Best.pth'
        if epoch == 0:
            torch.save(model.state_dict(), os.path.join(opt.outf, modelBest_file))
        else:
            if PSNRs[epoch] > PSNRs[epoch-1]:
                torch.save(model.state_dict(), os.path.join(opt.outf, modelBest_file))
Esempio n. 9
0
 def sample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     with torch.no_grad():
         return torch.tensor(gennorm.rvs(self.beta, size=shape), dtype=torch.float32, device=self.loc.device)
Esempio n. 10
0
def WeightedRandom(sel, M, N, total, dist):
    if dist == "no":
        if sel == "exp":
            rand = []
            expvar = M
            ctr = 100
            final = (-float(M)) / math.log(0.05)
            while ctr < total and expvar > final:
                expvar = expvar - 500
                ctr += 100
            if expvar < final:
                expvar = final
            if total < 0:
                for i in range(0, N):
                    rand.append(random.randint(0, M - 1))
                    while rand.count(rand[i]) > 1:
                        rand[i] = random.randint(0, M - 1)
            else:
                for i in range(0, N):
                    rand.append(int(expon.rvs(scale=expvar)))
                    if rand[i] < 0:
                        rand[i] = -rand[i]
                    while rand[i] >= M or rand.count(rand[i]) > 1:
                        rand[i] = int(expon.rvs(scale=expvar))
                        if rand[i] < 0:
                            rand[i] = -rand[i]
        elif sel == "filter":
            rand = []
            epscale = M
            ctr = 100
            while ctr < total and epscale != 1000:
                epscale = epscale - 500
                ctr += 100
            if total < 0:
                for i in range(0, N):
                    rand.append(random.randint(0, M - 1))
                    while rand.count(rand[i]) > 1:
                        rand[i] = random.randint(0, M - 1)
            else:
                for i in range(0, N):
                    rand.append(int(gennorm.rvs(5, scale=epscale)))
                    if rand[i] < 0:
                        rand[i] = -rand[i]
                    while rand[i] >= M or rand.count(rand[i]) > 1:
                        rand[i] = int(gennorm.rvs(5, scale=epscale))
                        if rand[i] < 0:
                            rand[i] = -rand[i]
        elif sel == "triang":
            rand = []
            triscale = M * 2
            ctr = 100
            final = M
            while ctr < total and triscale > M:
                triscale = triscale - 1000
                ctr += 100
            if triscale <= M:
                triscale = M + int(round(M / 5))
            if total < 0:
                for i in range(0, N):
                    rand.append(random.randint(0, M - 1))
                    while rand.count(rand[i]) > 1:
                        rand[i] = random.randint(0, M - 1)
            else:
                for i in range(0, N):
                    rand.append(int(triang.rvs(0, loc=0, scale=triscale)))
                    if rand[i] < 0:
                        rand[i] = -rand[i]
                    while rand[i] >= M or rand.count(rand[i]) > 1:
                        rand[i] = int(triang.rvs(0, loc=0, scale=triscale))
                        if rand[i] < 0:
                            rand[i] = -rand[i]
    else:
        if sel == "exp":
            rand = []
            expvar = (-float(M)) / math.log(0.05)
            if total < 0:
                for i in range(0, N):
                    rand.append(random.randint(0, M - 1))
                    while rand.count(rand[i]) > 1:
                        rand[i] = random.randint(0, M - 1)
            else:
                for i in range(0, N):
                    rand.append(int(expon.rvs(scale=expvar)))
                    if rand[i] < 0:
                        rand[i] = -rand[i]
                    while rand[i] >= M or rand.count(rand[i]) > 1:
                        rand[i] = int(expon.rvs(scale=expvar))
                        if rand[i] < 0:
                            rand[i] = -rand[i]
        elif sel == "filter":
            rand = []
            epscale = M / 5
            if total < 0:
                for i in range(0, N):
                    rand.append(random.randint(0, M - 1))
                    while rand.count(rand[i]) > 1:
                        rand[i] = random.randint(0, M - 1)
            else:
                for i in range(0, N):
                    rand.append(int(gennorm.rvs(5, scale=epscale)))
                    if rand[i] < 0:
                        rand[i] = -rand[i]
                    while rand[i] >= M or rand.count(rand[i]) > 1:
                        rand[i] = int(gennorm.rvs(5, scale=epscale))
                        if rand[i] < 0:
                            rand[i] = -rand[i]
        elif sel == "triang":
            rand = []
            triscale = M + int(round(M / 10))
            if total < 0:
                for i in range(0, N):
                    rand.append(random.randint(0, M - 1))
                    while rand.count(rand[i]) > 1:
                        rand[i] = random.randint(0, M - 1)
            else:
                for i in range(0, N):
                    rand.append(int(triang.rvs(0, loc=0, scale=triscale)))
                    if rand[i] < 0:
                        rand[i] = -rand[i]
                    while rand[i] >= M or rand.count(rand[i]) > 1:
                        rand[i] = int(triang.rvs(0, loc=0, scale=triscale))
                        if rand[i] < 0:
                            rand[i] = -rand[i]

    return rand
def randgn_like_cpu(tensor, p=2, device='cuda'):
    raw_variance = gamma(3. / p) / gamma(1. / p)
    raw_std = math.sqrt(raw_variance)
    flat = gennorm.rvs(p, size=torch.numel(tensor)) / raw_std
    return torch.tensor(flat).type(torch.float).cuda().reshape(tensor.shape)
Esempio n. 12
0
x = np.linspace(gennorm.ppf(0.01, beta),
                gennorm.ppf(0.99, beta), 100)
ax.plot(x, gennorm.pdf(x, beta),
       'r-', lw=5, alpha=0.6, label='gennorm pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = gennorm(beta)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = gennorm.ppf([0.001, 0.5, 0.999], beta)
np.allclose([0.001, 0.5, 0.999], gennorm.cdf(vals, beta))
# True

# Generate random numbers:

r = gennorm.rvs(beta, size=1000)

# And compare the histogram:

ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()