コード例 #1
0
ファイル: uncertainty.py プロジェクト: ziyanzzhu/am207
def auc_calc_beta(x_test, y_test, nn, N, perc, model, weightlist=None): 
    ''' 
    Options for model "mc", "bbvi", "ensemble", "deterministic"
    For BBVI, pass a list of weights. 
    '''
    p = []
    n_test = len(y_test)
    if model != "deterministic": 
        if model == "mc":
            p_allw, p_mean, entropymean = myentropy(nn, [nn.weights]*N, x_test.T, returnallp=True)
        elif (model == "bbvi") and weightlist is not None: 
            p_allw, p_mean, entropymean = myentropy(nn, weightlist, x_test.T, returnallp=True)
        elif model == "ensemble": # deterministic 
            p_allw, p_mean, entropymean = myentropy(nn, weightlist, x_test.T, returnallp=True)
        #p_allw has dimension: NWeightSamples x NXData
        idx = np.argsort(entropymean)
        y_test = y_test[idx]
        p_mean = p_mean[idx]
        p_allw = p_allw[:, idx]
        y_pred_retained_allw = p_allw[:, 0:int(perc*n_test)]
        y_pred_retained = p_mean[0:int(perc*n_test)] # choosing samples with smallest entropy to evaluate 
        y_test_retained = y_test[0:int(perc*n_test)]
        ypredmean = np.round(y_pred_retained)
        ypred_allw = np.round(y_pred_retained_allw) #NW x NX
        auc_allw = np.zeros(ypred_allw.shape[0])
        for w in range(ypred_allw.shape[0]):
            auc_allw[w] = np.count_nonzero(ypred_allw[w, :]==y_test_retained)/len(y_test_retained) * 100
        return auc_allw
    else: 
        auc = auc_calc_proba(x_test, y_test, nn, N, perc)
        return auc #this only returns the mean accuracy
コード例 #2
0
ファイル: util.py プロジェクト: afcarl/autopaint
def load_and_pickle_binary_mnist():
    N_data, train_images, train_labels, test_images, test_labels = load_mnist()
    train_images = np.round(train_images)
    test_images = np.round(test_images)
    mnist_data = N_data, train_images, train_labels, test_images, test_labels
    with open('mnist_binary_data.pkl', 'w') as f:
        pickle.dump(mnist_data, f, 1)
コード例 #3
0
def test_gp_missing_obs(bspline_data):
    low, high = bspline_data['xlim']
    num_bases = 5
    bsplines_degree = 3
    basis = BSplines(low, high, num_bases, bsplines_degree, boundaries='space')

    n_clusters = 1

    n_train = bspline_data['n_train']
    truncated_time = bspline_data['truncated_time']

    m = []
    for i in range(n_clusters):
        m.append(LinearWithBsplinesBasis(basis, no=i))
    tr = []
    tr.append((1.0, Treatment(2.0)))
    gp = GP(m, linear_cov(basis), tr, ac_fn=None)

    # modify dataset
    gp.fit(make_missed_obs_samples(bspline_data['training2']),
           options={'maxiter': 1})
    print(gp.params)

    assert np.round(gp.params['linear_with_bsplines_basis_mean_coef0'],
                    3).tolist() != [0.0] * num_bases
    assert gp.params['treatment'].tolist() != [0.0]
    assert np.round(gp.params['classes_prob_logit_F'], 0).tolist() == [1.0]
    _test_gp_prediction(
        gp, make_missed_obs_samples(bspline_data['testing1'][0:20]),
        truncated_time)
コード例 #4
0
ファイル: ADAM.py プロジェクト: varunranga/zorb-numpy
    def evaluate(self, X, Y, evaluation_type='regression'):

        predicted_y = self.predict(X)

        result = {}

        result['error'] = numpy.mean(numpy.linalg.norm(predicted_y - Y,
                                                       axis=1))

        if 'classification' in evaluation_type:

            correct = 0
            incorrect = 0

            if self.last_outgoing_dimension == 1:

                for py, y in zip(predicted_y, Y):
                    if (numpy.round(py) == numpy.round(y)):
                        correct += 1
                    else:
                        incorrect += 1

            elif self.last_outgoing_dimension > 1:

                for py, y in zip(predicted_y, Y):

                    if (numpy.argmax(py) == numpy.argmax(y)):
                        correct += 1
                    else:
                        incorrect += 1

            result['accuracy'] = correct / (correct + incorrect)

        return result
コード例 #5
0
def HMMModelTest():

    with np.errstate(under='ignore',
                     divide='raise',
                     over='raise',
                     invalid='raise'):

        T = 10
        D_latent = 5
        D_obs = 4
        meas = 2
        size = 3

        alpha_0 = np.random.random(D_latent) + 1
        alpha = np.random.random((D_latent, D_latent)) + 1
        L = np.random.random((D_latent, D_obs)) + 1

        params = {'alpha_0': alpha_0, 'alpha': alpha, 'L': L}

        hmm = HMMModel(**params)

        _, ys = HMMModel.generate(T=T,
                                  latentSize=D_latent,
                                  obsSize=D_obs,
                                  measurements=meas,
                                  size=size)

        hmm.fit(ys=ys,
                method='gibbs',
                nIters=500,
                burnIn=200,
                skip=2,
                verbose=True)
        marginal = hmm.state.ilog_marginal(ys)
        print('\nParams')
        for p in hmm.state.params:
            print(np.round(p, decimals=3))
            print()
        print('MARGNIAL', marginal)

        hmm.fit(ys=ys,
                method='EM',
                nIters=1000,
                monitorMarginal=10,
                verbose=False)
        marginal = hmm.state.ilog_marginal(ys)
        print('\nParams')
        for p in hmm.state.params:
            print(np.round(p, decimals=3))
            print()
        print('MARGNIAL', marginal)

        hmm.fit(ys=ys, method='cavi', maxIters=1000, verbose=False)
        elbo = hmm.state.iELBO(ys)
        print('\nPrior mean field params')
        for p in hmm.state.prior.mf_params:
            print(np.round(p, decimals=3))
            print()
        print('ELBO', elbo)
コード例 #6
0
def gradient_descent(g, w, x_train, x_val, alpha, max_its, batch_size,
                     **kwargs):
    verbose = True
    if 'verbose' in kwargs:
        verbose = kwargs['verbose']

    # flatten the input function, create gradient based on flat function
    g_flat, unflatten, w = flatten_func(g, w)
    grad = value_and_grad(g_flat)

    # record history
    num_train = x_train.shape[1]
    num_val = x_val.shape[1]
    w_hist = [unflatten(w)]
    train_hist = [g_flat(w, x_train, np.arange(num_train))]
    val_hist = [g_flat(w, x_val, np.arange(num_val))]

    # how many mini-batches equal the entire dataset?
    num_batches = int(np.ceil(np.divide(num_train, batch_size)))

    # over the line
    for k in range(max_its):
        # loop over each minibatch
        start = timer()
        train_cost = 0
        for b in range(num_batches):
            # collect indices of current mini-batch
            batch_inds = np.arange(b * batch_size,
                                   min((b + 1) * batch_size, num_train))

            # plug in value into func and derivative
            cost_eval, grad_eval = grad(w, x_train, batch_inds)
            grad_eval.shape = np.shape(w)

            # take descent step with momentum
            w = w - alpha * grad_eval

        end = timer()

        # update training and validation cost
        train_cost = g_flat(w, x_train, np.arange(num_train))
        val_cost = g_flat(w, x_val, np.arange(num_val))

        # record weight update, train and val costs
        w_hist.append(unflatten(w))
        train_hist.append(train_cost)
        val_hist.append(val_cost)

        if verbose == True:
            print('step ' + str(k + 1) + ' done in ' +
                  str(np.round(end - start, 1)) + ' secs, train cost = ' +
                  str(np.round(train_hist[-1][0], 4)) + ', val cost = ' +
                  str(np.round(val_hist[-1][0], 4)))

    if verbose == True:
        print('finished all ' + str(max_its) + ' steps')
        #time.sleep(1.5)
        #clear_output()
    return w_hist, train_hist, val_hist
コード例 #7
0
 def callback(V_flat):
     loss_val = loss(V_flat)
     V = V_flat.reshape((N, d))
     reg_val = ortho_reg_fn(V, lambda_param)
     loss_no_reg = loss_val - reg_val
     pi = -loss_no_reg
     print("PI = " + str(np.round(pi, 4)) + " bits, reg = " +
           str(np.round(reg_val, 4)))
        def animate(k):
            # clear panels
            ax.cla()
            lam = lams[k]

            # print rendering update
            if np.mod(k + 1, 25) == 0:
                print('rendering animation frame ' + str(k + 1) + ' of ' +
                      str(num_frames))
            if k == num_frames - 1:
                print('animation rendering complete!')
                time.sleep(1.5)
                clear_output()

            # run optimization
            if algo == 'gradient_descent':
                weight_history, cost_history = self.gradient_descent(
                    g, w, self.x, self.y, lam, alpha_choice, max_its,
                    batch_size)
            if algo == 'RMSprop':
                weight_history, cost_history = self.RMSprop(
                    g, w, self.x, self.y, lam, alpha_choice, max_its,
                    batch_size)

            # choose set of weights to plot based on lowest cost val
            ind = np.argmin(cost_history)

            # classification? then base on accuracy
            if 'counter' in kwargs:
                # create counting cost history as well
                counts = [
                    counter(v, self.x, self.y, lam) for v in weight_history
                ]
                if k == 0:
                    ind = np.argmin(counts)
                count = counts[ind]
                acc = 1 - count / self.y.size
                acc = np.round(acc, 2)

            # save lowest misclass weights
            w_best = weight_history[ind][1:]

            # plot
            ax.axhline(c='k', zorder=2)

            # make bar plot
            ax.bar(np.arange(0, len(w_best)), w_best, color='k', alpha=0.5)

            # dress panel
            title1 = r'$\lambda = ' + str(np.round(lam, 2)) + '$'
            costval = cost_history[ind][0]
            title2 = ', cost val = ' + str(np.round(costval, 2))
            if 'counter' in kwargs:
                title2 = ', accuracy = ' + str(acc)
            title = title1 + title2
            ax.set_title(title)
            ax.set_xlabel('learned weights')
            return artist,
コード例 #9
0
def gradient_descent(g, w, a_train, s_train, alpha, max_its, verbose):
    '''
    A basic gradient descent module (full batch) for system identification training.  
    Inputs to gradient_descent function:
    
    g - function to minimize
    w - initial weights
    a_train - training action sequence
    s_train - training state sequence
    alpha - steplength / learning rate
    max_its - number of iterations to perform
    verbose - print out update each step if verbose = True
    '''

    # flatten the input function, create gradient based on flat function
    g_flat, unflatten, w = flatten_func(g, w)
    grad = value_and_grad(g_flat)

    # record history
    # num_val = y_val.size
    w_hist = [unflatten(w)]
    train_hist = [g_flat(w, a_train, s_train)]

    # over the line
    alpha_choice = 0
    for k in range(1, max_its + 1):
        # take a single descent step
        start = timer()

        # plug in value into func and derivative
        cost_eval, grad_eval = grad(w, a_train, s_train)
        grad_eval.shape = np.shape(w)

        # take descent step with momentum
        w = w - alpha * grad_eval

        end = timer()

        # update training and validation cost
        train_cost = g_flat(w, a_train, s_train)
        val_cost = np.nan

        # record weight update, train cost
        w_hist.append(unflatten(w))
        train_hist.append(train_cost)

        if verbose == True:
            print('step ' + str(k + 1) + ' done in ' +
                  str(np.round(end - start, 1)) + ' secs, train cost = ' +
                  str(np.round(train_hist[-1], 4)[0]))

    if verbose == True:
        print('finished all ' + str(max_its) + ' steps')
    return w_hist, train_hist
コード例 #10
0
ファイル: util.py プロジェクト: mdw771/beyond_dof
def save_rotation_lookup(array_size, n_theta, dest_folder=None):

    image_center = [np.floor(x / 2) for x in array_size]

    coord0 = np.arange(array_size[0])
    coord1 = np.arange(array_size[1])
    coord2 = np.arange(array_size[2])

    coord2_vec = np.tile(coord2, array_size[1])

    coord1_vec = np.tile(coord1, array_size[2])
    coord1_vec = np.reshape(coord1_vec, [array_size[1], array_size[2]])
    coord1_vec = np.reshape(np.transpose(coord1_vec), [-1])

    coord0_vec = np.tile(coord0, [array_size[1] * array_size[2]])
    coord0_vec = np.reshape(coord0_vec, [array_size[1] * array_size[2], array_size[0]])
    coord0_vec = np.reshape(np.transpose(coord0_vec), [-1])

    # move origin to image center
    coord1_vec = coord1_vec - image_center[1]
    coord2_vec = coord2_vec - image_center[2]

    # create matrix of coordinates
    coord_new = np.stack([coord1_vec, coord2_vec]).astype(np.float32)

    # create rotation matrix
    theta_ls = np.linspace(0, 2 * np.pi, n_theta)
    coord_old_ls = []
    for theta in theta_ls:
        m_rot = np.array([[np.cos(theta),  -np.sin(theta)],
                          [np.sin(theta), np.cos(theta)]])
        coord_old = np.matmul(m_rot, coord_new)
        coord1_old = np.round(coord_old[0, :] + image_center[1]).astype(np.int)
        coord2_old = np.round(coord_old[1, :] + image_center[2]).astype(np.int)
        # clip coordinates
        coord1_old = np.clip(coord1_old, 0, array_size[1]-1)
        coord2_old = np.clip(coord2_old, 0, array_size[2]-1)
        coord_old = np.stack([coord1_old, coord2_old], axis=1)
        coord_old_ls.append(coord_old)
    if dest_folder is None:
        dest_folder = 'arrsize_{}_{}_{}_ntheta_{}'.format(array_size[0], array_size[1], array_size[2], n_theta)
    if not os.path.exists(dest_folder):
        os.mkdir(dest_folder)
    for i, arr in enumerate(coord_old_ls):
        np.save(os.path.join(dest_folder, '{:04}'.format(i)), arr)

    coord1_vec = coord1_vec + image_center[1]
    coord1_vec = np.tile(coord1_vec, array_size[0])
    coord2_vec = coord2_vec + image_center[2]
    coord2_vec = np.tile(coord2_vec, array_size[0])
    for i, coord in enumerate([coord0_vec, coord1_vec, coord2_vec]):
        np.save(os.path.join(dest_folder, 'coord{}_vec'.format(i)), coord)

    return coord_old_ls
コード例 #11
0
def test_mean_linear(linear_data):
    m = Linear(1)
    mp = m(params_only=True)
    mp = m(mp, linear_data['training2'], params_only=True)
    print(mp)

    coef_ = np.round(mp['linear_mean_coef'], 2).tolist()
    assert coef_[0] == 0.45
    assert coef_[1] == -0.63  # bias affected the treatment

    yhat = m(mp, np.array([1, 2, 3]))
    assert np.round(yhat, 8).tolist() == [-0.17919259, 0.266663, 0.7125186]
コード例 #12
0
def get_training_data(
        data):  #get all data input  and output for training from the csv file
    n = np.size(data, 0)
    n = 2 * n / 3
    n = int(np.round(n))
    train_data = data[:n, :]
    return train_data
コード例 #13
0
ファイル: minimc.py プロジェクト: phnascimento2/minimc
def leapfrog(q, p, dVdq, path_len, step_size):
    """Leapfrog integrator for Hamiltonian Monte Carlo.

    Parameters
    ----------
    q : np.floatX
        Initial position
    p : np.floatX
        Initial momentum
    dVdq : callable
        Gradient of the velocity
    path_len : float
        How long to integrate for
    step_size : float
        How long each integration step should be

    Returns
    -------
    q, p : np.floatX, np.floatX
        New position and momentum
    """
    q, p = np.copy(q), np.copy(p)

    p -= step_size * dVdq(q) / 2  # half step
    for _ in np.arange(np.round(path_len / step_size) - 1):
        q += step_size * p  # whole step
        p -= step_size * dVdq(q)  # whole step
    q += step_size * p  # whole step
    p -= step_size * dVdq(q) / 2  # half step

    # momentum flip at end
    return q, -p
コード例 #14
0
 def Accuracy(self, y, y_hat):
     y_hat = np.round(y_hat)
     temp = 0
     for i in range(len(y)):
         if y_hat[i] == y[i]:
             temp += 1
     return temp * 100 / (len(y))
コード例 #15
0
ファイル: uncertainty.py プロジェクト: ziyanzzhu/am207
def auc_calc(x_test, y_test, nn, N, perc, model, weightlist=None): 
    ''' 
    Options for model "mc", "bbvi", "ensemble", "deterministic"
    For BBVI, pass a list of weights. 
    '''
    p = []
    n_test = len(y_test)
    if model != "deterministic": 
        if model == "mc":
            p_mean, entropymean = myentropy(nn, [nn.weights]*N, x_test.T)
        elif (model == "bbvi") and weightlist is not None: 
            p_mean, entropymean = myentropy(nn, weightlist, x_test.T)
        elif model == "ensemble": # deterministic 
            nn_weights = [] 
            for nn_here in nn: 
                nn_weights.append(nn_here.weights) 
            p_mean, entropymean = myentropy(nn, nn_weights, x_test.T)

        idx = np.argsort(entropymean)
        y_test = y_test[idx]
        p_mean = p_mean[idx]
        y_pred_retained = p_mean[0:int(perc*n_test)] # choosing samples with smallest entropy to evaluate 
        y_test_retained = y_test[0:int(perc*n_test)]
        predict_proba = np.round(y_pred_retained)
  
        auc = len(y_test_retained[predict_proba==y_test_retained]) / len(y_pred_retained) * 100
    else: 
        auc = auc_calc_proba(x_test, y_test, nn, N, perc)
        
    return auc
コード例 #16
0
def p1_optimal(
        p1=100,
        p2=100,
        alpha=[1, 0, 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1, 0., 0., 0.],
        n_clients_per_class=[50, 20, 10, 5]):
    '''
    Inputs :
        ------------
        
    Output :
        p1 : the best prize found for the first item
        
    '''
    bounds_p1 = so.Bounds(0, np.inf)  # p1 prize of first item positive

    x0 = [p1]

    gradient_obj_fun = grad(obj_fun)

    res = so.minimize(obj_fun,
                      x0=x0,
                      args=(p2, alpha, n_clients_per_class),
                      jac=gradient_obj_fun,
                      bounds=bounds_p1)

    x_sol = res.x

    return np.round(x_sol[0])
コード例 #17
0
def get_test_data_result(
        data):  #get all data output for testing from the csv file
    n = np.size(data, 0)
    n = 2 * n / 3
    n = int(np.round(n))
    y = data[n:, -1]
    return y
コード例 #18
0
def classification_data(seed=0):
    """
    Load 2D data. 2 Classes. Class labels generated from a 2-2-1 network.
    :param seed: random number seed
    :return:
    """
    npr.seed(seed)
    data = np.load("./data/2D_toy_data_linear.npz")
    x = data['x']
    y = data['y']
    ids = np.arange(x.shape[0])
    npr.shuffle(ids)
    # 75/25 split
    num_train = int(np.round(0.01 * x.shape[0]))
    x_train = x[ids[:num_train]]
    y_train = y[ids[:num_train]]
    x_test = x[ids[num_train:]]
    y_test = y[ids[num_train:]]
    mu = np.mean(x_train, axis=0)
    std = np.std(x_train, axis=0)
    x_train = (x_train - mu) / std
    x_test = (x_test - mu) / std
    train_stats = dict()
    train_stats['mu'] = mu
    train_stats['sigma'] = std
    return x_train, y_train, x_test, y_test, train_stats
コード例 #19
0
    def __init__(self,
                 Wdict,
                 rate_f=calnet.utils.f_miller_troyer,
                 rate_fprime=calnet.utils.fprime_m_miller_troyer,
                 u_fn=calnet.utils.u_fn_WW):
        # Wmx,Wmy,Wsx,Wsy,s02,k,kappa,XX,XXp,Eta,Xi
        for key in Wdict:
            setattr(self, key, Wdict[key])

        self.rate_f = rate_f
        self.rate_fprime = rate_fprime
        self.u_fn = u_fn

        self.nP = self.Wmx.shape[0]
        self.nQ = self.Wmy.shape[0]
        self.nS = int(np.round(self.Eta.shape[1] / self.nQ))
        self.nT = 1
        self.nN = self.Eta.shape[0]

        wws = ['WWmx', 'WWmy', 'WWsx', 'WWsy']
        ws = ['Wmx', 'Wmy', 'Wsx', 'Wsy']
        for w, ww in zip(ws, wws):
            W = getattr(self, w)
            WW = calnet.utils.gen_Weight_k_kappa(W, self.k, self.kappa)
            setattr(self, ww, WW)

        self.YY = self.compute_f_(self.Eta, self.Xi, self.s02)
        self.resEta = self.Eta - self.u_fn_m(self.XX, self.YY)
        self.resXi = self.Xi - self.u_fn_s(self.XX, self.YY)
コード例 #20
0
def get_metrics(y_real, y_pred):
    for how_many_last in [100, 20, 10]:
        real = y_real[-how_many_last:]
        pred = y_pred[-how_many_last:]
        print('On last {} steps'.format(how_many_last))
        print('test acc', np.mean(real == np.round(pred)))
        print('test auc', roc_auc_score(real, pred))
コード例 #21
0
ファイル: fm_generator.py プロジェクト: hasnank/tugasakhir
def step_generator(t, **kwargs):
    '''
    Makes a random piecewise constant step frequency input.  Note 
    that the steps always take on integer values.
    '''
    # lets make random piecewise frequency
    num_steps = 2
    if 'num_steps' in kwargs:
        num_steps = kwargs['num_steps'] - 1

    # pick num_pieces random locations for step ledges
    r = np.random.permutation(len(t))[:num_steps]
    r = np.sort(r)

    # generate random level per step
    levels = np.round(5 * np.random.rand(num_steps + 1)) + 1
    f = np.ones((t.size, 1))

    # set each chunk to appropriate level
    f[:r[0]] *= levels[0]  # set first chunk to appropriate level
    for n in range(1, len(r) - 1):
        f[r[n - 1]:r[n]] *= levels[n]
    f[r[-1]:] *= levels[-1]

    return f
コード例 #22
0
ファイル: util.py プロジェクト: mdw771/beyond_dof
def realign_image(arr, shift):
    """
    Translate and rotate image via Fourier

    Parameters
    ----------
    arr : ndarray
        Image array.

    shift: tuple
        Mininum and maximum values to rescale data.

    angle: float, optional
        Mininum and maximum values to rescale data.

    Returns
    -------
    ndarray
        Output array.
    """
    # if both shifts are integers, do circular shift; otherwise perform Fourier shift.
    if np.count_nonzero(np.abs(np.array(shift) - np.round(shift)) < 0.01) == 2:
        temp = np.roll(arr, int(shift[0]), axis=0)
        temp = np.roll(temp, int(shift[1]), axis=1)
        temp = temp.astype('float32')
    else:
        temp = fourier_shift(np.fft.fftn(arr), shift)
        temp = np.fft.ifftn(temp)
        temp = np.abs(temp).astype('float32')
    return temp
コード例 #23
0
def test_sgp(bspline_data):
    low, high = bspline_data['xlim']
    num_bases = 5
    bsplines_degree = 3
    basis = BSplines(low, high, num_bases, bsplines_degree, boundaries='space')

    n_clusters = 1
    random_basis = np.random.multivariate_normal(np.zeros(num_bases),
                                                 0.1 * np.eye(num_bases),
                                                 n_clusters)

    n_train = bspline_data['n_train']
    truncated_time = bspline_data['truncated_time']

    m = []
    for i in range(n_clusters):
        m.append(LinearWithBsplinesBasis(basis, no=i, init=random_basis[i]))
    tr = []
    tr.append((1.0, Treatment(2.0)))
    mcgp = GP(m, linear_cov(basis), tr, ac_fn=None)
    mcgp.fit(bspline_data['training2'], options={'maxiter': 1})
    print(mcgp.params)

    assert mcgp.params['treatment'].tolist() != [0.0]
    assert np.round(mcgp.params['classes_prob_logit_F'], 0).tolist() == [1.0]
    _test_gp_prediction(mcgp, bspline_data['testing1'][0:20], truncated_time)
コード例 #24
0
ファイル: optimizers.py プロジェクト: tonytuoli/mlrefined
def gradient_descent(g, alpha, max_its, w, num_pts, train_portion,**kwargs):    
    # flatten the input function, create gradient based on flat function
    g_flat, unflatten, w = flatten_func(g, w)
    grad = value_and_grad(g_flat)

    # containers for histories
    weight_hist = []
    train_ind_hist = []
    test_ind_hist = []
    
    # store first weights
    weight_hist.append(unflatten(w))
    
    # pick random proportion of training indecies
    train_num = int(np.round(train_portion*num_pts))
    inds = np.random.permutation(num_pts)
    train_inds = inds[:train_num]
    test_inds = inds[train_num:]
    
    # record train / test inds
    train_ind_hist.append(train_inds)
    test_ind_hist.append(test_inds)
    
    # over the line
    for k in range(max_its):   
        # plug in value into func and derivative
        cost_eval,grad_eval = grad(w,train_inds)
        grad_eval.shape = np.shape(w)

        # take descent step with momentum
        w = w - alpha*grad_eval

        # record weight update
        weight_hist.append(unflatten(w))        
        
        #### pick new train / test split ####
        # pick random proportion of training indecies
        train_num = int(np.round(train_portion*num_pts))
        inds = np.random.permutation(num_pts)
        train_inds = inds[:train_num]
        test_inds = inds[train_num:]
        
        # record train / test inds
        train_ind_hist.append(train_inds)
        test_ind_hist.append(test_inds)
        
    return weight_hist,train_ind_hist,test_ind_hist
コード例 #25
0
ファイル: source.py プロジェクト: Runjing-Liu120/scarlet
    def __init__(self, frame, sky_coord, observations):
        """Source intialized with a single pixel

        Parameters
        ----------
        frame: `~scarlet.Frame`
            The frame of the model
        sky_coord: tuple
            Center of the source
        observations: instance or list of `~scarlet.Observation`
            Observation(s) to initialize this source
        """
        C, Ny, Nx = frame.shape
        self.center = np.array(frame.get_pixel(sky_coord), dtype="float")

        # initialize SED from sky_coord
        try:
            iter(observations)
        except TypeError:
            observations = [observations]

        # determine initial SED from peak position
        # SED in the frame for source detection
        seds = []
        for obs in observations:
            _sed = get_psf_sed(sky_coord, obs, frame)
            seds.append(_sed)
        sed = np.concatenate(seds).reshape(-1)

        if np.any(sed <= 0):
            # If the flux in all channels is  <=0,
            # the new sed will be filled with NaN values,
            # which will cause the code to crash later
            msg = "Zero or negative SED {} at y={}, x={}".format(
                sed, *sky_coord)
            if np.all(sed <= 0):
                logger.warning(msg)
            else:
                logger.info(msg)

        # set up parameters
        sed = Parameter(
            sed,
            name="sed",
            step=partial(relative_step, factor=1e-2),
            constraint=PositivityConstraint(),
        )
        center = Parameter(self.center, name="center", step=1e-1)

        # define bbox
        pixel_center = tuple(np.round(center).astype("int"))
        front, back = 0, C
        bottom = pixel_center[0] - frame.psf.shape[1] // 2
        top = pixel_center[0] + frame.psf.shape[1] // 2
        left = pixel_center[1] - frame.psf.shape[2] // 2
        right = pixel_center[1] + frame.psf.shape[2] // 2
        bbox = Box.from_bounds((front, back), (bottom, top), (left, right))

        super().__init__(frame, sed, center, self._psf_wrapper, bbox=bbox)
コード例 #26
0
def _test_gp_prediction(m, data, truncated_time, exclude_ac=[]):
    _samples = make_predict_samples(data, None, None, truncated_time)
    s = 0.0
    for (y, x), (_y, _x, _x_star) in zip(data, _samples):
        yhat, cov_hat = m.predict(_x_star, _y, _x, exclude_ac)
        _t_star, _rx_star = _x_star
        idx = _t_star > truncated_time
        s += np.sum((yhat - y)[idx]**2) / len(y[idx])

        p_a, p_mix = m.class_posterior(_y, _x, exclude_ac)
        if exclude_ac: assert len(p_a) == 2 - len(exclude_ac)
        assert np.round(np.sum(p_a), 0) == 1.0
        assert np.round(np.sum(p_mix), 0) == 1.0

    mse = s / len(data)
    print(mse)
    assert True
コード例 #27
0
def PrintPerf(Params, iter, _):
    if iter == 0:
        print("     Epoch     |    Train cost  ")
    if iter % 5 == 0:
        Cost = ObjectiveFunWrap(Params, iter)
        Gradient = flatten(ObjectiveGrad(Params, iter))
        print(
            str(iter) + '  ' + str(np.round(Cost, 6)) + '  ' +
            str(np.square(Gradient[0]).sum()))
コード例 #28
0
def MovingWinFeats(x, xLen, fs, winLen, winDisp, featFn):
    y = np.zeros((1, np.floor(((xLen - winLen * fs) / (winDisp * fs)) + 1)))
    y[0] = featFn(x[0:np.round(winLen * fs)])

    a = np.arange(2, np.floor(((xLen - winLen * fs) / (winDisp * fs)) + 1))
    for i in a:
        y[i] = featFn(x[np.ceil(winDisp * fs *
                                (i - 1)):np.ceil(winDisp * fs * (i - 1) +
                                                 winLen * fs)])
    def draw_fit_trainval(self,ax,run,plot_fit):
        # set plotting limits
        xmax = np.max(copy.deepcopy(self.x))
        xmin = np.min(copy.deepcopy(self.x))
        xgap = (xmax - xmin)*0.1
        xmin -= xgap
        xmax += xgap

        ymax = np.max(copy.deepcopy(self.y))
        ymin = np.min(copy.deepcopy(self.y))
        ygap = (ymax - ymin)*0.3
        ymin -= ygap
        ymax += ygap    

        ####### plot total model on original dataset #######
        # scatter original data - training and validation sets
        train_inds = run.train_inds
        valid_inds = run.val_inds
        ax.scatter(self.x[:,train_inds],self.y[:,train_inds],color = self.colors[1],s = 40,edgecolor = 'k',linewidth = 0.9)
        ax.scatter(self.x[:,valid_inds],self.y[:,valid_inds],color = self.colors[0],s = 40,edgecolor = 'k',linewidth = 0.9)
        
        if plot_fit == True:
            # plot fit on residual
            s = np.linspace(xmin,xmax,2000)[np.newaxis,:]

            # plot total fit
            t = 0

            # get current run
            cost = run.cost
            model = run.model
            feat = run.feature_transforms
            normalizer = run.normalizer
            cost_history = run.train_cost_histories[0]
            weight_history = run.weight_histories[0]

            # get best weights                
            win = np.argmin(cost_history)
            w = weight_history[win]        
            t = model(normalizer(s),w)

            ax.plot(s.T,t.T,linewidth = 4,c = 'k')
            ax.plot(s.T,t.T,linewidth = 2,c = 'r')
            
            lam = run.lam
            ax.set_title( 'lam = ' + str(np.round(lam,2)) + ' and fit to original',fontsize = 14)

        if plot_fit == False:
            ax.set_title('test',fontsize = 14,color = 'w')

        ### clean up panels ###             
        ax.set_xlim([xmin,xmax])
        ax.set_ylim([ymin,ymax])
        
        # label axes
        ax.set_xlabel(r'$x$', fontsize = 14)
        ax.set_ylabel(r'$y$', rotation = 0,fontsize = 14,labelpad = 15)
コード例 #30
0
 def split_data(self, folds):
     # split data into k equal (as possible) sized sets
     L = np.size(self.y)
     order = np.random.permutation(L)
     c = np.ones((L, 1))
     L = int(np.round((1 / folds) * L))
     for s in np.arange(0, folds - 2):
         c[order[s * L:(s + 1) * L]] = s + 2
     c[order[(folds - 1) * L:]] = folds
     return c
コード例 #31
0
ファイル: test_binary_ops.py プロジェクト: shuangao/autograd
def test_pow():
    fun = lambda x, y : to_scalar(x ** y)
    d_fun_0 = lambda x, y : to_scalar(grad(fun, 0)(x, y))
    d_fun_1 = lambda x, y : to_scalar(grad(fun, 1)(x, y))
    make_positive = lambda x : np.abs(x) + 1.1 # Numeric derivatives fail near zero
    for arg1, arg2 in arg_pairs():
        arg1 = make_positive(arg1)
        arg2 = np.round(arg2)
        check_grads(fun, arg1, arg2)
        check_grads(d_fun_0, arg1, arg2)
        check_grads(d_fun_1, arg1, arg2)
コード例 #32
0
ファイル: mniw.py プロジェクト: WuCPMark/svae
    def sample_invwishart(S, nu):
        n = S.shape[0]
        chol = np.linalg.cholesky(S)

        if (nu <= 81 + n) and (nu == np.round(nu)):
            x = npr.randn(nu, n)
        else:
            x = np.diag(np.sqrt(np.atleast_1d(chi2.rvs(nu - np.arange(n)))))
            x[np.triu_indices_from(x, 1)] = npr.randn(n*(n-1)//2)
        R = np.linalg.qr(x, 'r')
        T = solve_triangular(R.T, chol.T, lower=True).T
        return np.dot(T, T.T)
コード例 #33
0
ファイル: util.py プロジェクト: cpehle/diffmem
def toArray(dic,h,w):
  """
  Convert dicts of arrays to arrays for the visualize function.
  """
  outList = []
  for k in dic:
    if k != -1:
      outList.append(dic[k].tolist())
  outC = np.round(np.array(outList),2)
  outC = outC.T
  out = np.reshape(outC,(h,w))
  return out
コード例 #34
0
ファイル: fits_image.py プロジェクト: HIPS/DESI-MCMC
    def __init__(self, band,
            filename           = None,
            fits_file_template = None,
            timg               = None,
            exposure_num       = 0,
            calib              = None,
            gain               = None,
            darkvar            = None,
            sky                = None,
            frame              = None, 
            fits_table         = None):
        self.band      = band
        if fits_file_template:
            self.band_file = fits_file_template%band
            self.img       = fitsio.FITS(self.band_file)[exposure_num].read()
            header         = fitsio.read_header(self.band_file, ext=exposure_num)
        elif filename is not None:
            self.band_file = filename
            self.img       = fitsio.FITS(self.band_file)[exposure_num].read()
            header         = fitsio.read_header(self.band_file, ext=exposure_num)
        elif timg:
            self.band_file = None
            self.img       = timg[0].getImage()
            header         = timg[1]['hdr']
            self.timg      = timg[0]
            self.invvar    = self.timg.getInvvar()
        else:
            pass

        self.header = header
        self.frame  = frame
        self.fits_table = fits_table

        # Compute the number of electrons, resource: 
        # http://data.sdss3.org/datamodel/files/BOSS_PHOTOOBJ/frames/RERUN/RUN/CAMCOL/frame.html
        # (Neither of these look like integers)
        if fits_file_template or filename:
            self.dn    = self.img / header["CALIB"] + header["SKY"]
            self.nelec = np.round(self.dn * header["GAIN"])
        else:
            # TODO(awu): what are CALIB and GAIN?
            self.dn    = self.img / calib + sky #timg[0].getSky().val
            self.nelec = np.round(self.dn * gain)

        # make nelec immutable - it is constant data!!
        self.nelec.flags.writeable = False
        self.shape = self.nelec.shape
        self.pixel_grid = self.make_pixel_grid()  # keep pixel grid around

        # reference points
        # TODO: Does CRPIX1 refer to the first axis of self.img ?? 
        self.rho_n = np.array([header['CRPIX1'], header['CRPIX2']]) - 1  # PIXEL REFERENCE POINT (fits stores it 1-based indexing)
        self.phi_n = np.array([header['CRVAL1'], header['CRVAL2']])     # EQUA REFERENCE POINT
        self.Ups_n = np.array([[header['CD1_1'], header['CD1_2']],      # MATRIX takes you into EQUA TANGENT PLANE
                               [header['CD2_1'], header['CD2_2']]])
        self.Ups_n_inv = np.linalg.inv(self.Ups_n)

        #astrometry wcs object for "exact" x,y to equa ra,dec conversion
        import astropy.wcs as wcs
        self.wcs = wcs.WCS(self.header)
        self.use_wcs = False

        # set image specific KAPPA and epsilon 
        if fits_file_template:
            self.kappa    = header['GAIN']     # TODO is this right??
            self.epsilon  = header['SKY'] * self.kappa # background rate
            self.epsilon0 = self.epsilon      # background rate copy (for debuggin)
            self.darkvar  = header['DARKVAR']  # also eventually contributes to mean?
            self.calib    = header['CALIB']    # dn = nmaggies / calib, calib is NMGY
        else:
            self.kappa = gain
            self.epsilon = timg[0].sky.val * self.kappa
            self.epsilon0 = self.epsilon
            self.darkvar = darkvar
            self.calib = calib

        # point spread function
        if fits_file_template:
            psfvec       = [header['PSF_P%d'%i] for i in range(18)]
        else:
            psfvec       = [psf for psf in timg[0].getPsf()]

        self.weights = np.array(psfvec[0:3])
        self.means   = np.array(psfvec[3:9]).reshape(3, 2)  # one comp mean per row
        covars       = np.array(psfvec[9:]).reshape(3, 3)   # [var_k(x), var_k(y), cov_k(x,y)] per row
        self.covars  = np.zeros((3, 2, 2))
        self.invcovars = np.zeros((3, 2, 2))
        self.logdets   = np.zeros(3)
        for i in range(3):
            self.covars[i,:,:]    = np.array([[ covars[i,0],  covars[i,2]],
                                              [ covars[i,2],  covars[i,1]]])

            # cache inverse covariance 
            self.invcovars[i,:,:] = np.linalg.inv(self.covars[i,:,:])

            # cache log determinant
            sign, logdet = np.linalg.slogdet(self.covars[i,:,:])
            self.logdets[i] = logdet

        self.psf_mog = MixtureOfGaussians(means = self.means, covs = self.covars, pis = self.weights)

        # for a point source in this image, calculate the radius such that 
        # at least 99% of photons from that source will fall within
        ERROR = 0.001
        self.R = calc_bounding_radius(self.weights,
                                      self.means,
                                      self.covars,
                                      ERROR)
コード例 #35
0
ファイル: test_scipy.py プロジェクト: ericmjl/autograd
 def test_poisson_logpmf_broadcast(): combo_check(stats.poisson.logpmf, [1])([np.round(R(4, 3)**2)], [R(4, 1)**2 + 1.1])
 def test_poisson_pmf_broadcast():    combo_check(stats.poisson.pmf,    [1])([np.round(R(4, 3)**2)], [R(4, 1)**2 + 1.1])
コード例 #36
0
ファイル: test_scipy.py プロジェクト: ericmjl/autograd
 def test_poisson_logpmf(): combo_check(stats.poisson.logpmf, [1])([np.round(R(4)**2)], [R(4)**2 + 1.1])
 def test_poisson_pmf():    combo_check(stats.poisson.pmf,    [1])([np.round(R(4)**2)], [R(4)**2 + 1.1])
コード例 #37
0
ファイル: test_scipy.py プロジェクト: ericmjl/autograd
    def test_poisson_pmf_broadcast():    combo_check(stats.poisson.pmf,    [1])([np.round(R(4, 3)**2)], [R(4, 1)**2 + 1.1])

    def test_t_pdf():    combo_check(stats.t.pdf,    [0,1,2,3])([R(4)], [R(4)**2 + 2.1], [R(4)], [R(4)**2 + 2.1])