Example #1
0
    def evaluate(self, X, Y, evaluation_type='regression'):

        predicted_y = self.predict(X)

        result = {}

        result['error'] = numpy.mean(numpy.linalg.norm(predicted_y - Y,
                                                       axis=1))

        if 'classification' in evaluation_type:

            correct = 0
            incorrect = 0

            if self.last_outgoing_dimension == 1:

                for py, y in zip(predicted_y, Y):
                    if (numpy.round(py) == numpy.round(y)):
                        correct += 1
                    else:
                        incorrect += 1

            elif self.last_outgoing_dimension > 1:

                for py, y in zip(predicted_y, Y):

                    if (numpy.argmax(py) == numpy.argmax(y)):
                        correct += 1
                    else:
                        incorrect += 1

            result['accuracy'] = correct / (correct + incorrect)

        return result
Example #2
0
    def fit(self, X, y, X_valid, y_valid):
        for epoch in range(self.num_of_epochs):
            print("epoch number: " + str(epoch + 1))
            permuted_indices = np.random.permutation(X.shape[0])
            for i in range(0, X.shape[0], self.batch_size):
                selected_data_points = np.take(permuted_indices,
                                               range(i, i + self.batch_size),
                                               mode='wrap')
                delta_w = self._d_cost(X[selected_data_points],
                                       y[selected_data_points], self.weights)
                self.weights -= delta_w * self.learning_rate

            training_accuracy = compute_accuracy(self.predict(X),
                                                 np.argmax(y, 1))
            validation_accuracy = compute_accuracy(self.predict(X_valid),
                                                   np.argmax(y_valid, 1))

            print("training accuracy: " + str(round(training_accuracy, 2)))
            print("validation accuracy: " + str(round(validation_accuracy, 2)))

            print("cost: " + str(self._cost(X, y, self.weights)))

            if self.validation_accuracy < validation_accuracy:
                self.validation_accuracy = validation_accuracy
                self.old_weights = self.weights
            else:
                self.weights = self.old_weights
                self.learning_rate = 0.5 * self.learning_rate
Example #3
0
def gradient_descent_class(g, alpha, max_its, w, x, y):
    """
    g: function to minimize value of
    alpha: step size
    max_its: maximum number of iterations
    w: starting weights (often randomized)
    """
    # create gradient calculator function for input function
    gradient = grad(g)
    # initial conditions calculations
    weight_history = [w]  # weight history container
    cost_history = [g(w)]  # cost function history container
    y_model = np.argmax(model(x, w), axis=0)[np.newaxis, :]
    misclass_history = [np.sum(y_model != y)]
    balanced_acc_history = [balanced_accuracy(y, y_model)]
    # gradient descent loop
    # (with progress bar using tqdm)
    for k in tqdm(range(max_its), desc="gradient step iteration"):
        # eval gradient
        grad_eval = gradient(w)
        # gradient descent step
        w = w - alpha * grad_eval
        # record weight, cost, misclassifications, balanced accuracy
        weight_history.append(w)
        cost_history.append(g(w))
        y_model = np.argmax(model(x, w), axis=0)[np.newaxis, :]
        misclass_history.append(np.sum(y_model != y))
        balanced_acc_history.append(balanced_accuracy(y, y_model))

    return (np.array(weight_history), np.array(cost_history),
            np.array(misclass_history), np.array(balanced_acc_history))
Example #4
0
def GetVideoTimeSeries():
    TS = []
    VIS_RY = []
    VIS_RX = []
    t0 = None
    lasthsum = None
    lastvsum = None
    hprior, vprior = 0, 0
    for msg in parse.ParseLog(open('../rustlerlog-BMPauR')):
        if msg[0] == 'img':
            _, ts, im = msg
            im = GammaCorrect(im)
            hsum = np.sum(im, axis=0)
            hsum -= np.mean(hsum)
            vsum = np.sum(im, axis=1)
            vsum -= np.mean(vsum)
            if t0 is None:
                t0 = ts
                lasthsum = hsum
                lastvsum = vsum
            hoffset = np.argmax(
                -2*np.arange(-80 - hprior, 81 - hprior)**2 +
                np.correlate(lasthsum, hsum[80:-80], mode='valid')) - 80
            voffset = np.argmax(
                -2*np.arange(-60 - vprior, 61 - vprior)**2 +
                np.correlate(lastvsum, vsum[60:-60], mode='valid')) - 60
            TS.append(ts - t0)
            VIS_RY.append(hoffset)
            VIS_RX.append(voffset)
            hprior, vprior = hoffset, voffset
            lasthsum = hsum
            lastvsum = vsum
    return TS, VIS_RY, VIS_RX
Example #5
0
def q18():
    X, y = shuffle(X_trn, y_trn)
    split_ind = int(len(X) / 2)
    train_x, train_y = X[:split_ind], y[:split_ind]
    test_x, test_y = X[split_ind:], y[split_ind:]

    gen_errors = {}

    for M in [5, 40, 70]:
        _losses, W, V, b, c = train(train_x, train_y, M)
        misses = 0
        for i in range(len(test_x)):
            x = test_x[i]
            y = test_y[i]
            f = c + V.dot(np.tanh(b + W.dot(x)))
            pred_y = np.argmax(f)
            if pred_y != y:
                misses += 1
        gen_errors[str(M)] = misses / len(test_y)

    print(gen_errors)

    _losses, W, V, b, c = train(X_trn, y_trn, 40)
    preds = []

    for X in X_tst:
        f = c + V.dot(np.tanh(b + W.dot(X)))
        pred = np.argmax(f)
        preds.append(pred)

    write_csv(preds, 'q18.csv')
Example #6
0
 def plt_accuracy(self, lamda_choice, weights_initial):
     """Draw accuracy corresponding to train and test data"""
     weights_random = []
     train_accuracy = []
     test_accuracy = []
     for i in range(len(lamda_choice)):
         self.lamda = lamda_choice[i]
         self.fit(weights_initial)
         pred_result = self.predict()
         test_ac = self.evaluate(pred_result)
         test_accuracy.append(test_ac)
         train_ac = self.train_accuracy()
         train_accuracy.append(train_ac)
         weights_random.append(self.w_result[-1])
     # print best lamda with highest accuracy
     print("choose lambda: %f" % lamda_choice[np.argmax(train_accuracy)])
     labels = ["Train_accuracy", "Test_accuracy"]
     fig, ax = plt.subplots()
     ax.plot(lamda_choice, train_accuracy, 'o-', label='Train_accuracy')
     ax.plot(lamda_choice, test_accuracy, 'o-', label='Test_accuracy')
     plt.xlabel('lambda choice')
     plt.ylabel('accuracy')
     # Draw absolute weight value corresponding the random feature
     fig, ax = plt.subplots()
     ax.plot(lamda_choice, weights_random, label='Weight for random')
     plt.xlabel('lambda_choice')
     plt.ylabel('weight_random')
     plt.show()
     # find the best lamda
     self.lamda = lamda_choice[np.argmax(train_accuracy)]
Example #7
0
def logistic_loss_batch(weights, x, y, unflatten):
    # regularization penalty
    lambda_pen = 10

    # unflatten weights into W, b, V and c respectively
    (W, b, V, c) = unflatten(weights)

    # Predict output for the entire train data
    out = feedForward(W, b, V, c, x)
    pred = np.argmax(out, axis=1)

    # True labels
    true = np.argmax(y, axis=1)
    # Mean accuracy
    class_err = np.mean(pred != true)

    # Computing logistic loss with l2 penalization
    logistic_loss = np.sum(-np.sum(out * y, axis=1) + np.log(
        np.sum(np.exp(out), axis=1))) + lambda_pen * np.sum(weights**2)
    #logistic_loss = np.sum(np.sum(np.log(1+np.exp(-1*out*y)), axis=1)) + lambda_pen * np.sum(weights**2)

    # returning loss. Note that outputs can only be returned in the below format
    return (logistic_loss, [
        autograd.util.getval(logistic_loss),
        autograd.util.getval(class_err)
    ])
    def fit(self, X, y, X_valid, y_valid):
        for epoch in range(self.num_of_epochs):
            print("epoch number: " + str(epoch + 1))
            permuted_indices = np.random.permutation(X.shape[0])
            for i in range(0, X.shape[0], self.batch_size):
                selected_data_points = np.take(permuted_indices, range(i, i+self.batch_size), mode='wrap')
                delta_w = self._d_cost(X[selected_data_points], y[selected_data_points], self.weights)
                for w, d in zip(self.weights, delta_w):
                    w -= d*self.learning_rate
                for i in range(len(self.weights)):
                    self.ema_weights[i] = self.ema_weights[i]*self.ema + self.weights[i]*(1-self.ema)

            training_accuracy = compute_accuracy(self.predict(X, self.weights), np.argmax(y, 1))
            validation_accuracy = compute_accuracy(self.predict(X_valid, self.weights), np.argmax(y_valid, 1))

            print("training accuracy: " + str(round(training_accuracy, 2)))
            print("validation accuracy: " + str(round(validation_accuracy, 2)))

            print("cost: " + str(self._cost(X, y, self.weights)))

            training_accuracy = compute_accuracy(self.predict(X, self.ema_weights), np.argmax(y, 1))
            validation_accuracy = compute_accuracy(self.predict(X_valid, self.ema_weights), np.argmax(y_valid, 1))

            print("training accuracy ema: " + str(round(training_accuracy, 2)))
            print("validation accuracy ema: " + str(round(validation_accuracy, 2)))

            self.save_average_and_std(X)
Example #9
0
    def viterbi(self, obs, act=None):
        loginit, logtrans, logobs = self.log_likelihoods(obs, act)

        delta = []
        z = []
        for _logobs, _logtrans in zip(logobs, logtrans):
            T = _logobs.shape[0]

            _delta = np.zeros((T, self.nb_states))
            _args = np.zeros((T, self.nb_states), np.int64)
            _z = np.zeros((T, ), np.int64)

            for t in range(T - 2, -1, -1):
                _aux = _logtrans[t, :] + _delta[t + 1, :] + _logobs[t + 1, :]
                _delta[t, :] = np.max(_aux, axis=1)
                _args[t + 1, :] = np.argmax(_aux, axis=1)

            _z[0] = np.argmax(loginit + _delta[0, :] + _logobs[0, :], axis=0)
            for t in range(1, T):
                _z[t] = _args[t, _z[t - 1]]

            delta.append(_delta)
            z.append(_z)

        return delta, z
Example #10
0
def GetVideoTimeSeries():
    TS = []
    VIS_RY = []
    VIS_RX = []
    t0 = None
    lasthsum = None
    lastvsum = None
    hprior, vprior = 0, 0
    for msg in parse.ParseLog(open('../rustlerlog-BMPauR')):
        if msg[0] == 'img':
            _, ts, im = msg
            im = GammaCorrect(im)
            hsum = np.sum(im, axis=0)
            hsum -= np.mean(hsum)
            vsum = np.sum(im, axis=1)
            vsum -= np.mean(vsum)
            if t0 is None:
                t0 = ts
                lasthsum = hsum
                lastvsum = vsum
            hoffset = np.argmax(
                -2 * np.arange(-80 - hprior, 81 - hprior)**2 +
                np.correlate(lasthsum, hsum[80:-80], mode='valid')) - 80
            voffset = np.argmax(
                -2 * np.arange(-60 - vprior, 61 - vprior)**2 +
                np.correlate(lastvsum, vsum[60:-60], mode='valid')) - 60
            TS.append(ts - t0)
            VIS_RY.append(hoffset)
            VIS_RX.append(voffset)
            hprior, vprior = hoffset, voffset
            lasthsum = hsum
            lastvsum = vsum
    return TS, VIS_RY, VIS_RX
Example #11
0
def accuracy(params, inputs, targets):
    target_class = np.argmax(targets, axis=1)
    print('Targets')
    print(targets)
    predicted_class = np.argmax(neural_net_predict(params, inputs), axis=1)
    print('neural_net_predict(params, inputs)')
    print(neural_net_predict(params, inputs))
    return np.mean(predicted_class == target_class)
def calculate_accuracy(weights, bias, x, y):
    '''
    given the minimizing weights and bias, compute the accuracy for x_valid and y_valid
    '''
    Fhat = np.exp(forward_pass(weights[0], weights[1], weights[2], bias[0], bias[1], bias[2], x))
    y_pred = np.argmax(Fhat, axis=1)
    y = np.argmax(y, axis=1)
    return (y_pred == y).sum() / len(y)
Example #13
0
    def __solve_local_robustness(self, model, spec, display):
        x0 = np.array(ast.literal_eval(read(spec['x0'])))
        y0 = np.argmax(model.apply(x0), axis=1)[0]

        res, x = self.__solve_robustness(model, spec, x0, y0)

        if not res and display:
            y = np.argmax(model.apply(x), axis=1)[0]
            display.show(model, x0, y0, x, y)
Example #14
0
def best():
    ans = nnm.answer(testing_images)
    correct = (np.argmax(ans, 1) == np.argmax(testing_labels, 1))
    for digit in range(10):
        idx = np.argmax(ans[correct][:, digit])
        plt.subplot(2, 5, digit + 1)
        plt_image(testing_images[correct][idx].reshape([28, 28]))
        print digit, ans[correct][idx][digit]
    plt.show()
Example #15
0
def prediction_accuracy(data, labels, theta):
    accuracy = 0
    for i in range(len(data)):
        prob_arr = log_bernoulli_prod(data[i], theta)
        pred = np.argmax(prob_arr)
        target = np.argmax(labels[i])
        if pred == target:
            accuracy += 1
    return np.divide(accuracy, len(data))
Example #16
0
def best():
    ans = nnm.answer(testing_images)
    correct = (np.argmax(ans, 1) == np.argmax(testing_labels, 1))
    for digit in range(10):
        idx = np.argmax(ans[correct][:, digit])
        plt.subplot(2, 5, digit+1)
        plt_image(testing_images[correct][idx].reshape([28, 28]))
        print digit, ans[correct][idx][digit]
    plt.show()
Example #17
0
    def avg_pred_acc(W, X, t):
        # compute the log MAP estimation error
        W_reshape = W.T.reshape(784, 10, 100)

        z = np.tensordot(X, W_reshape, axes=1)
        sf_sum = logsumexp(z, axis=1, keepdims=True)

        softmax = z - np.hstack([sf_sum for i in xrange(10)])
        softmax_avg = softmax.mean(axis=2)

        return np.mean(np.argmax(softmax_avg, axis=1) == np.argmax(t, axis=1))
Example #18
0
    def frac_err(W_vect, X, T):
        stimuli = ['none', 'LF', 'LH', 'RF', 'RH', 'T']

        y_test = np.argmax(T, axis=1)
        y_hat = np.argmax(pred_fun(W_vect, X), axis=1)

        C_mat = confusion_matrix(y_test, y_hat)
        C = pd.DataFrame(C_mat, index=stimuli, columns=stimuli)

        print(C)
        return np.mean(y_test != y_hat)
Example #19
0
def prediction_accuracy(data, labels, theta):
    accuracy = 0
    for i in range(len(data)):
        denomenator = []
        for j in weights:
            denomenator.append(np.dot(j, data[i]))
        pred = np.argmax(denomenator)
        target = np.argmax(labels[i])
        if pred == target:
            accuracy += 1
    return np.divide(accuracy, len(data))
Example #20
0
 def _predict_next_obs(self, obs_seq):
     """
     the index of the most probable next observation
     """
     samples = create_xset_onebitflip(obs_seq[-1:][0])
     log_prob_states = self._hmm.predict_xnp1(data=obs_seq, x=samples)
     # get max of pre_states
     max_idx = np.argmax(log_prob_states)
     if max_idx == len(log_prob_states) - 1:
         return np.argmax(log_prob_states[:len(log_prob_states) - 1])
     else:
         return max_idx
Example #21
0
def plot_posterior_spikes(q, model, ys, us, tr=0):

    q_lem_x = q.mean_continuous_states[tr]
    # J_diag = q._params[tr]["J_diag"]
    # J_lower_diag= q._params[tr]["J_lower_diag"]
    # J = blocks_to_full(J_diag, J_lower_diag)
    # Jinv = np.linalg.inv(J)
    # q_lem_std = np.sqrt(np.diag(Jinv))

    q_z = q.mean_discrete_states[tr]
    q_lem_z = np.argmax(q_z, axis=1)
    # q_lem_z = model.most_likely_states(q_lem_x, ys[tr])

    f, (a0, a1, a2) = plt.subplots(3,
                                   1,
                                   gridspec_kw={'height_ratios': [1, 3.5, 1]},
                                   figsize=[8, 6])

    yhat = model.smooth(q_lem_x, ys[tr], input=us[tr])
    # zhat = model.most_likely_states(q_lem_x, ys[tr], input=us[tr])
    zhat = np.argmax(q.mean_discrete_states[tr], axis=1)
    a0.imshow(np.row_stack((zs[tr], zhat)), aspect="auto", vmin=0, vmax=1)
    a0.set_xticks([])
    a0.set_yticks([0, 1])
    a0.set_yticklabels(["$z$", "$\hat{z}$"])
    a0.set_xlim([0, ys[tr].shape[0] - 1])
    # a0.axis("off")
    a1.plot(xs[tr], 'b', label="true")
    a1.plot(q_lem_x, 'k', label="inferred")
    # a1.fill_between(np.arange(np.shape(ys[tr])[0]),(q_lem_x-q_lem_std*2.0)[:,0], (q_lem_x+q_lem_std*2.0)[:,0], facecolor='k', alpha=0.3)
    for j in range(5):
        x_sample = q.sample_continuous_states()[tr]
        a1.plot(x_sample, 'k', alpha=0.3)
    a1.plot(np.array([0, np.shape(ys[tr])[0]]),
            np.array([1.0, 1.0]),
            'k--',
            linewidth=1)
    a1.set_ylim([-0.1, 1.1])
    a1.set_ylabel("$x$")
    a1.set_xlim([0, ys[tr].shape[0] - 1])
    a1.legend()
    a2.set_ylabel("$y$")
    for n in range(ys[tr].shape[1]):
        a2.eventplot(np.where(ys[tr][:, n] > 0)[0],
                     linelengths=0.5,
                     lineoffsets=1 + n,
                     color='k')
    sns.despine()
    a2.set_yticks([])
    a2.set_xlim([0, ys[tr].shape[0] - 1])
    plt.tight_layout()
    plt.show()
Example #22
0
def accuracy(params, input, labels):
    '''
    Params : Params: params - Parameters of word embedding neural network.
             input - Encoded word on which model is going to perform prediction.
            labels - Label associated with the word (here neighbouring words of input word)

    Output - Accuracy metric value to show how model is performing on given input.

    '''
    prediction = np.argmax(predict(params, input), axis=1)
    target = np.argmax(labels, axis=1)
    #total number of prediction equal target divided total number of samples which provide accuracy metrics
    return np.mean(prediction == target)
Example #23
0
def accuracy(params, inputs, targets):
    target_class = np.argmax(targets, axis=1)
    predicted_class = np.argmax(mask_predict(params, inputs), axis=1)
    #return np.mean(predicted_class == target_class)
    min_class = min(target_class)
    max_class = max(target_class)
    num_class = max_class - min_class + 1
    acc = 0
    for c in range(min_class, max_class):
        c_index = np.where(target_class == c)[0]
        c_target = target_class[c_index]
        c_pred = predicted_class[c_index]
        acc = acc + np.mean(c_pred == c_target)
    return acc
Example #24
0
def fit_negative_binomial_integer_r(xs, r_min=1, r_max=20):
    """
    Fit a negative binomial distribution NB(r, p) to data xs,
    under the constraint that the shape r is an integer.

    The durations are 1 + a negative binomial random variable.
    """
    assert isinstance(xs, np.ndarray) and xs.ndim == 1 and xs.min() >= 1
    xs -= 1
    N = len(xs)
    x_sum = np.sum(xs)

    p_star = lambda r: np.clip(x_sum / (N * r + x_sum), 1e-8, 1 - 1e-8)

    def nb_marginal_likelihood(r):
        # Compute the log likelihood of data with shape r and
        # MLE estimate p = sum(xs) / (N*r + sum(xs))
        ll = np.sum(gammaln(xs + r)) - np.sum(gammaln(xs + 1)) - N * gammaln(r)
        ll += np.sum(xs * np.log(p_star(r))) + N * r * np.log(1 - p_star(r))
        return ll

    # Search for the optimal r. If variance of xs exceeds the mean, the MLE exists.
    rs = np.arange(r_min, r_max + 1)
    mlls = [nb_marginal_likelihood(r) for r in rs]
    r_star = rs[np.argmax(mlls)]

    return r_star, p_star(r_star)
def plot_most_likely_dynamics(model,
    xlim=(-4, 4), ylim=(-3, 3), nxpts=20, nypts=20,
    alpha=0.8, ax=None, figsize=(3, 3)):
    
    K = model.K
    assert model.D == 2
    x = np.linspace(*xlim, nxpts)
    y = np.linspace(*ylim, nypts)
    X, Y = np.meshgrid(x, y)
    xy = np.column_stack((X.ravel(), Y.ravel()))

    # Get the probability of each state at each xy location
    z = np.argmax(xy.dot(model.transitions.Rs.T) + model.transitions.r, axis=1)

    if ax is None:
        fig = plt.figure(figsize=figsize)
        ax = fig.add_subplot(111)

    for k, (A, b) in enumerate(zip(model.dynamics.As, model.dynamics.bs)):
        dxydt_m = xy.dot(A.T) + b - xy

        zk = z == k
        if zk.sum(0) > 0:
            ax.quiver(xy[zk, 0], xy[zk, 1],
                      dxydt_m[zk, 0], dxydt_m[zk, 1],
                      color=colors[k % len(colors)], alpha=alpha)

    ax.set_xlabel('$x_1$')
    ax.set_ylabel('$x_2$')

    plt.tight_layout()

    return ax
Example #26
0
def fix_phases(mode, *args):
    """Returns the row (or column) phase fixed versions of tmat depending on
    the mode.

    :param mode:
        'rows_by_first': the first column is made real
        'cols_by_first': the first row is made real
        'rows_by_max': the maximum-modulus element of each row is made real
        'cols_by_max': the maximum-modulus element of each column is made real
    :param *args: Transfer matrices to be phase-fixed. For the max.
        element-modes the first one is taken for choosing the max. elements
        and the other ones are phase-fixed to the same elements.
    :returns: List of normalized version of tmats

    TODO More pythonic
    """
    if mode == 'cols_by_first':
        sel = np.zeros(args[0].shape[0], dtype=int)
    elif mode == 'rows_by_first':
        return [x.T for x in fix_phases('cols_by_first', *(x.T for x in args))]

    elif mode == 'cols_by_max':
        sel = np.argmax(np.abs(args[0]), axis=0)
    elif mode == 'rows_by_max':
        return [x.T for x in fix_phases('cols_by_max', *(x.T for x in args))]

    else:
        raise ValueError("{} is not a valid mode.".format(mode))

    phase = lambda x: x / np.abs(x)
    return [tmat / phase(sel.choose(tmat))[None, :] for tmat in args]
Example #27
0
    def choose_next_point(domain_min,
                          domain_max,
                          acquisition_function,
                          num_tries=15,
                          rs=npr.RandomState(0)):
        """Uses gradient-based optimization to find next query point."""
        init_points = rs.rand(num_tries, D) * \
            (domain_max - domain_min) + domain_min

        grad_obj = value_and_grad(lambda x: -acquisition_function(x))

        def optimize_point(init_point):
            print('.', end='')
            result = minimize(grad_obj,
                              x0=init_point,
                              jac=True,
                              method='L-BFGS-B',
                              options={'maxiter': 10},
                              bounds=list(zip(domain_min, domain_max)))
            return result.x, acquisition_function(result.x)

        optimzed_points, optimized_values = list(
            zip(*list(map(optimize_point, init_points))))
        print()
        best_ix = np.argmax(optimized_values)
        return np.atleast_2d(optimzed_points[best_ix])
Example #28
0
        def print_solution(problem, params):
            steps, initial_features = problem
            features = initial_features
            fake_score = np.zeros(features[0].shape[0])

            string = ""

            for i in range(features[0].shape[0]):
                print([feature[i, 0] for feature in features])

            task = 0
            for mask, utility_mask in steps:
                for i in range(utility_mask.shape[0]):
                    if utility_mask[i] > 0:
                        string += " {}] ".format(i)

                scores = model(np.concatenate(features, axis=1),
                               params).reshape((-1,))
                fake_score[task] = 0
                task = np.argmax(scores * mask)
                string += "{}".format(task)
                fake_score[task] = 1
                features = update_features(features, fake_score, mask)

            print(string)
Example #29
0
def testDataOutputFile(weights, test_x, unflatten, fileTestOutput):
    (W, b, V, c) = unflatten(weights)
    out = feedForward(W, b, V, c, test_x)
    predY = np.argmax(out, axis=1)
    #output to file
    if fileTestOutput != "":
        kaggle.kaggleize(predY, fileTestOutput)
def maxproj(t_vec, sD1, sc=1):
    '''
    This function finds the point on a manifold (defined by a set of points sD1) with the largest projection onto
    each individual t vector given by t_vec.

    Args:
        t_vec: 2D array of shape (D+1, n_t) where D+1 is the dimension of the linear space, and n_t is the number
            of sampled vectors
        sD1: 2D array of shape (D+1, m) where m is number of manifold points
        sc: Value for center dimension (scalar, default 1)

    Returns:
        s0: 2D array of shape (D+1, n_t) containing the points with maximum projection onto corresponding t vector.
        gt: 1D array of shape (D+1) containing the value of the maximum projection of manifold points projected
            onto the corresponding t vector.
    '''
    # get the dimension and number of the t vectors
    D1, n_t = t_vec.shape
    D = D1 - 1
    # Get the number of samples for the manifold to be processed
    m = sD1.shape[1]
    # For each of the t vectors, find the maximum projection onto manifold points
    # Ignore the last of the D+1 dimensions (center dimension)
    s0 = np.zeros((D1, n_t))
    gt = np.zeros(n_t)
    for i in range(n_t):
        t = t_vec[:, i]
        # Project t onto the SD vectors and find the S vector with the largest projection
        max_S = np.argmax(np.dot(t[0:D], sD1[0:D]))
        sr = sD1[0:D, max_S]
        # Append sc to this vector
        s0[:, i] = np.append(sr, [sc])
        # Compute the projection of this onto t
        gt[i] = np.dot(t, s0[:, i])
    return s0, gt
    def plot_fit(self,weights,**kwargs):
        # construct figure
        fig, axs = plt.subplots(1, 3, figsize=(9,4))

        # create subplot with 2 panels
        gs = gridspec.GridSpec(1, 3, width_ratios=[1,5,1]) 
        ax1 = plt.subplot(gs[0]); ax1.axis('off') 
        ax2 = plt.subplot(gs[1]); 
        ax3 = plt.subplot(gs[2]); ax3.axis('off')
    
        # scatter points
        xmin,xmax = self.scatter_pts(ax2,self.x,self.y)
        
        # create fit
        s = np.linspace(xmin,xmax,300)[np.newaxis,:]
        colors = ['k','magenta']
        if 'colors' in kwargs:
            colors = kwargs['colors']
        c = 0
        transformer = lambda a: a
        if 'transformer' in kwargs:
            transformer = kwargs['transformer']
        a = self.model(transformer(s),weights)

        # plot counting cost 
        t = np.argmax(a,axis = 1).flatten()
        ax2.plot(s.flatten(),t,linewidth = 4,color = 'b',zorder = 2)
Example #32
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    n = 128
    matrix = rnd.randn(n, n)
    matrix = 0.5 * (matrix + matrix.T)

    cost, egrad = create_cost_egrad(backend, matrix)
    manifold = Sphere(n)
    problem = pymanopt.Problem(manifold, cost=cost, egrad=egrad)
    if quiet:
        problem.verbosity = 0

    solver = SteepestDescent()
    estimated_dominant_eigenvector = solver.solve(problem)

    if quiet:
        return

    # Calculate the actual solution by a conventional eigenvalue decomposition.
    eigenvalues, eigenvectors = la.eig(matrix)
    dominant_eigenvector = eigenvectors[:, np.argmax(eigenvalues)]

    # Make sure both vectors have the same direction. Both are valid
    # eigenvectors, but for comparison we need to get rid of the sign
    # ambiguity.
    if (np.sign(dominant_eigenvector[0]) != np.sign(
            estimated_dominant_eigenvector[0])):
        estimated_dominant_eigenvector = -estimated_dominant_eigenvector

    # Print information about the solution.
    print("l2-norm of x: %f" % la.norm(dominant_eigenvector))
    print("l2-norm of xopt: %f" % la.norm(estimated_dominant_eigenvector))
    print("Solution found: %s" % np.allclose(
        dominant_eigenvector, estimated_dominant_eigenvector, rtol=1e-3))
    error_norm = la.norm(dominant_eigenvector - estimated_dominant_eigenvector)
    print("l2-error: %f" % error_norm)
	def predict(self, x):
		if self.prob_func_ == "sigmoid":
			prob = (1.0 / (1.0 + np.exp(-np.dot(x, self.coef_) - self.intercept_)))[:,np.newaxis]
			prob = np.concatenate((1.0-prob, prob), axis=1)
		else: # self.prob_func_ == "softmax"
			prob = np.exp(np.dot(x, self.coef_.T) + self.intercept_)
			prob /= np.sum(prob, axis=1)[:,np.newaxis]
		return np.array([self.classes_[i] for i in np.argmax(prob, axis=1)])
Example #34
0
def worst():
    ans = nnm.answer(testing_images)
    for digit in range(10):
        where = np.argmax(testing_labels, 1) == digit
        idx = np.argmin(ans[where, digit])
        plt.subplot(2, 5, digit+1)
        plt_image(testing_images[where][idx].reshape([28, 28]))
        print digit, ans[where][idx]
    plt.show()
Example #35
0
def shape_match_1d(y, x):
    """
    match the shape of y to that of x and return the new y object.
    """
    #assert(len(y.shape) == 1)
    sh = (np.array(x.shape) == np.array(y.shape))
    if sh.sum() != 1:
        print("There have been " + str(sh.sum()) + " matches in shape instead of exactly 1. Using first match only.", file=sys.stderr)
        sh[np.argmax(sh)+1:] = False
    
    new_sh = np.ones(sh.shape)
    new_sh[sh] = y.shape
    return np.reshape(y, new_sh)
Example #36
0
    def forward_pass(self, X):
        self.last_input = X

        out_height, out_width = pooling_shape(self.pool_shape, X.shape, self.stride)
        n_images, n_channels, _, _ = X.shape

        col = image_to_column(X, self.pool_shape, self.stride, self.padding)
        col = col.reshape(-1, self.pool_shape[0] * self.pool_shape[1])

        arg_max = np.argmax(col, axis=1)
        out = np.max(col, axis=1)
        self.arg_max = arg_max
        return out.reshape(n_images, out_height, out_width, n_channels).transpose(0, 3, 1, 2)
Example #37
0
    def choose_next_point(domain_min, domain_max, acquisition_function, num_tries=15, rs=npr.RandomState(0)):
        """Uses gradient-based optimization to find next query point."""
        init_points = rs.rand(num_tries, D) * (domain_max - domain_min) + domain_min

        grad_obj = value_and_grad(lambda x: -acquisition_function(x))
        def optimize_point(init_point):
            print('.', end='')
            result = minimize(grad_obj, x0=init_point, jac=True, method='L-BFGS-B',
                              options={'maxiter': 10}, bounds=list(zip(domain_min, domain_max)))
            return result.x, acquisition_function(result.x)
        optimzed_points, optimized_values = list(zip(*list(map(optimize_point, init_points))))
        print()
        best_ix = np.argmax(optimized_values)
        return np.atleast_2d(optimzed_points[best_ix])
Example #38
0
 def _decode_map(self, data):  # adapted hmmlearn
     framelogprob = self._compute_log_likelihood(data)
     logprob, fwdlattice = self._do_forward_pass(framelogprob)
     bwdlattice = self._do_backward_pass(framelogprob)
     gamma = fwdlattice + bwdlattice
     # gamma is guaranteed to be correctly normalized by logprob at
     # all frames, unless we do approximate inference using pruning.
     # So, we will normalize each frame explicitly in case we
     # pruned too aggressively.
     posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
     posteriors += np.finfo(np.float64).eps
     posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
     state_sequence = np.argmax(posteriors, axis=1)
     map_logprob = np.max(posteriors, axis=1).sum()
     return map_logprob, state_sequence
Example #39
0
    def _init_params(self, data, lengths=None, params='stmpaw'):
        X = data['obs']

        if self.n_lags == 0:
            super(ARTHMM, self)._init_params(data, lengths, params)
        else:
            if 's' in params:
                super(ARTHMM, self)._init_params(data, lengths, 's')

            if 't' in params:
                super(ARTHMM, self)._init_params(data, lengths, 't')

            if 'm' in params or 'a' in params or 'p' in params:
                kmmod = cluster.KMeans(
                    n_clusters=self.n_unique,
                    random_state=self.random_state).fit(X)
                kmeans = kmmod.cluster_centers_
                ar_mod = []
                ar_alpha = []
                ar_resid = []
                if not self.shared_alpha:
                    for u in range(self.n_unique):
                        ar_mod.append(smapi.tsa.AR(X[kmmod.labels_ == \
                                                u]).fit(self.n_lags))
                        ar_alpha.append(ar_mod[u].params[1:])
                        ar_resid.append(ar_mod[u].resid)
                else:
                    # run one AR model on most part of time series
                    # that has most points assigned after clustering
                    mf = np.argmax(np.bincount(kmmod.labels_))
                    ar_mod.append(smapi.tsa.AR(X[kmmod.labels_ == \
                                              mf]).fit(self.n_lags))
                    ar_alpha.append(ar_mod[0].params[1:])
                    ar_resid.append(ar_mod[0].resid)

            if 'm' in params:
                mu_init = np.zeros((self.n_unique, self.n_features))
                for u in range(self.n_unique):
                    ar_idx = u
                    if self.shared_alpha:
                        ar_idx = 0
                    mu_init[u] = kmeans[u, 0] - np.dot(
                            np.repeat(kmeans[u, 0], self.n_lags),
                            ar_alpha[ar_idx])
                self.mu_ = np.copy(mu_init)

            if 'p' in params:
                precision_init = np.zeros((self.n_unique, self.n_features))
                for u in range(self.n_unique):
                    if not self.shared_alpha:
                        maxVar = np.max([np.var(ar_resid[i]) for i in
                                        range(self.n_unique)])
                    else:
                        maxVar = np.var(ar_resid[0])
                    precision_init[u] = 1.0 / maxVar
                self.precision_ = np.copy(precision_init)

            if 'a' in params:
                alpha_init = np.zeros((self.n_unique, self.n_lags))
                for u in range(self.n_unique):
                    ar_idx = u
                    if self.shared_alpha:
                        ar_idx = 0
                    alpha_init[u, :] = ar_alpha[ar_idx]
                self.alpha_ = alpha_init
        datum_id = npr.randint(0, num_datums)

        # Assess expected reward across all possible actions (loop over context + action vectors)
        rewards = []
        contexts = np.zeros((num_actions, F))
        for aa in range(num_actions):
            contexts[aa,:] = np.hstack((x[datum_id, :], [aa]))
            outputs = generate_nn_output(variational_params,
                                         np.expand_dims(contexts[aa,:],0),
                                         num_weights,
                                         num_samples)
            rewards.append(np.mean(outputs))

        # Check which is greater and choose that [1,0] = eat | [0,1] do not eat
        # If argmax returns 0, then we eat, otherwise we don't
        action_chosen = np.argmax(rewards)
        reward, oracle_reward = reward_function(action_chosen, y[datum_id])

        # Calculate the cumulative regret
        cumulative_regret += oracle_reward - agent_reward

        # Store the experience of that reward as a training/data pair
        experience.append([contexts[action_chosen, :], reward])

        # Choose the action that maximizes the expected reward or go with epsilon greedy
        if len(experience) > batch_size*2:
            for ix in xrange(5):
                batch_data = np.zeros((batch_size, F))
                batch_labels = np.zeros((batch_size, 1))
                indices = np.random.choice(len(experience), batch_size, replace=False)
                for ix in range(batch_size):
    # Initialize variational parameters
    rs = npr.RandomState(0)
    num_samples = 2
    init_mean = rs.randn(num_weights)
    init_log_std = -5 * np.ones(num_weights)
    variational_params = np.concatenate([init_mean, init_log_std])



    for step in range(num_steps):
        offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
        batch_data = train_data[offset:(offset + batch_size), :]
        batch_labels = train_labels[offset:(offset + batch_size), :]
        variational_params = update_nn(variational_params, batch_data, batch_labels)

        if (step % 10) == 0:
            correct = 0
            num_test = len(test_labels)
            for ix, val in enumerate(test_labels):
                outputs = generate_nn_output(variational_params,
                                            np.expand_dims(test_data[ix,:],0),
                                            num_weights,
                                            num_samples)
                predicted_class = np.argmax(np.mean(outputs, axis=0))
                actual_class = np.argmax(val)
                if actual_class == predicted_class:
                    correct += 1

            print ('Accuracy at step %d: %2.3f' % (step, float(correct)/num_test*100))
Example #42
0
def bayesian_optimize(func, domain_min, domain_max, num_iters=20, callback=None):

    D = len(domain_min)

    num_params, predict, log_marginal_likelihood = \
        make_gp_funs(rbf_covariance, num_cov_params=D + 1)

    model_params = init_covariance_params(num_params)

    def optimize_gp_params(init_params, X, y):
        log_hyperprior = lambda params: np.sum(norm.logpdf(params, 0., 100.))
        objective = lambda params: -log_marginal_likelihood(params, X, y) -log_hyperprior(params)
        return minimize(value_and_grad(objective), init_params, jac=True, method='CG').x

    def choose_next_point(domain_min, domain_max, acquisition_function, num_tries=15, rs=npr.RandomState(0)):
        """Uses gradient-based optimization to find next query point."""
        init_points = rs.rand(num_tries, D) * (domain_max - domain_min) + domain_min

        grad_obj = value_and_grad(lambda x: -acquisition_function(x))
        def optimize_point(init_point):
            print('.', end='')
            result = minimize(grad_obj, x0=init_point, jac=True, method='L-BFGS-B',
                              options={'maxiter': 10}, bounds=list(zip(domain_min, domain_max)))
            return result.x, acquisition_function(result.x)
        optimzed_points, optimized_values = list(zip(*list(map(optimize_point, init_points))))
        print()
        best_ix = np.argmax(optimized_values)
        return np.atleast_2d(optimzed_points[best_ix])


    # Start by evaluating once in the middle of the domain.
    X = np.zeros((0, D))
    y = np.zeros((0))
    X = np.concatenate((X, np.reshape((domain_max - domain_min) / 2.0, (D, 1))))
    y = np.concatenate((y, np.reshape(np.array(func(X)), (1,))))

    for i in range(num_iters):
        if i > 1:
            print("Optimizing model parameters...")
            model_params = optimize_gp_params(model_params, X, y)

        print("Choosing where to look next", end='')
        def predict_func(xstar):
            mean, cov = predict(model_params, X, y, xstar)
            return mean, np.sqrt(np.diag(cov))

        def acquisition_function(xstar):
            xstar = np.atleast_2d(xstar)  # To work around a bug in scipy.minimize
            mean, std = predict_func(xstar)
            return expected_new_max(mean, std, defaultmax(y))
        next_point = choose_next_point(domain_min, domain_max, acquisition_function)

        print("Evaluating expensive function...")
        new_value = func(next_point)

        X = np.concatenate((X, next_point))
        y = np.concatenate((y, np.reshape(np.array(new_value), (1,))))

        if callback:
            callback(X, y, predict_func, acquisition_function, next_point, new_value)

    best_ix = np.argmax(y)
    return X[best_ix, :], y[best_ix]
Example #43
0
def one_hot_to_string(one_hot_matrix):
    return "".join([chr(np.argmax(c)) for c in one_hot_matrix])
Example #44
0
 def classification_err(self, W_vect, X, Y):
     logits = self.predicted_class_logprobs(W_vect, X)
     return np.mean(np.argmax(Y, axis=1) != np.argmax(logits, axis=1))
Example #45
0
def success_rate(output, expected_output):
    a = np.argmax(output, 1)
    b = np.argmax(expected_output, 1)
    return float(np.sum(a == b)) / len(expected_output)
Example #46
0
 def frac_err(weights, inputs, targets):
     return np.mean(np.argmax(targets, axis=2) != np.argmax(outputs(weights, inputs), axis=2))
Example #47
0
    def _init_params(self, data, lengths=None, params='stmpaw'):
        X = data['obs']

        if self.n_lags == 0:
            super(ARTHMM, self)._init_params(data, lengths, params)
        else:
            if 's' in params:
                super(ARTHMM, self)._init_params(data, lengths, 's')

            if 't' in params:
                super(ARTHMM, self)._init_params(data, lengths, 't')

            if 'm' in params or 'a' in params or 'p' in params:
                kmmod = cluster.KMeans(
                    n_clusters=self.n_unique,
                    random_state=self.random_state).fit(X)
                kmeans = kmmod.cluster_centers_
                ar_mod = []
                ar_alpha = []
                ar_resid = []

                if not self.shared_alpha:
                    count = 0
                    for u in range(self.n_unique):
                        for f in range(self.n_features):
                            ar_mod.append(smapi.tsa.AR(X[kmmod.labels_ == \
                                            u,f]).fit(self.n_lags))
                            ar_alpha.append(ar_mod[count].params[1:])
                            ar_resid.append(ar_mod[count].resid)
                            count += 1
                else:
                    # run one AR model on most part of time series
                    # that has most points assigned after clustering
                    mf = np.argmax(np.bincount(kmmod.labels_))
                    for f in range(self.n_features):
                        ar_mod.append(smapi.tsa.AR(X[kmmod.labels_ == \
                                                    mf,f]).fit(self.n_lags))
                        ar_alpha.append(ar_mod[f].params[1:])
                        ar_resid.append(ar_mod[f].resid)

            if 'm' in params:
                mu_init = np.zeros((self.n_unique, self.n_features))
                for u in range(self.n_unique):
                    for f in range(self.n_features):
                        ar_idx = u
                        if self.shared_alpha:
                            ar_idx = 0
                        mu_init[u,f] = kmeans[u, f] - np.dot(
                        np.repeat(kmeans[u, f], self.n_lags), ar_alpha[ar_idx])
                self.mu_ = np.copy(mu_init)

            if 'p' in params:

                precision_init = \
                np.zeros((self.n_unique, self.n_features, self.n_features))

                for u in range(self.n_unique):
                    if self.n_features == 1:
                        precision_init[u] = 1.0/(np.var(X[kmmod.labels_ == u]))

                    else:
                        precision_init[u] = np.linalg.inv\
                        (np.cov(np.transpose(X[kmmod.labels_ == u])))

                        # Alternative: Initialization using ar_resid
                        #for f in range(self.n_features):
                        #    if not self.shared_alpha:
                        #        precision_init[u,f,f] = 1./np.var(ar_resid[count])
                        #        count += 1
                        #    else:
                        #        precision_init[u,f,f] = 1./np.var(ar_resid[f])'''

                self.precision_ = np.copy(precision_init)

            if 'a' in params:
                if self.shared_alpha:
                    alpha_init = np.zeros((1, self.n_lags))
                    alpha_init = ar_alpha[0].reshape((1, self.n_lags))
                else:
                    alpha_init = np.zeros((self.n_unique, self.n_lags))
                    for u in range(self.n_unique):
                        ar_idx = 0
                        alpha_init[u] = ar_alpha[ar_idx]
                        ar_idx += self.n_features
                self.alpha_ = np.copy(alpha_init)
Example #48
0
 def frac_err(W_vect, X, T):
     return np.mean(np.argmax(T, axis=1) != np.argmax(pred_fun(W_vect, X), axis=1))
Example #49
0
 def frac_err(W_vect, X, T):
     return np.mean(np.argmax(T, axis=1) != \
                    np.argmax(predictions(W_vect, X), axis=1))
Example #50
0
 def frac_err(weights, X, T):
     return np.mean(np.argmax(T, axis=1) != np.argmax(np.mean(predictions(weights, X), axis=0), axis=1))
Example #51
0
 def validating_f(weights, data, labels):
     return np.mean(np.argmax(labels, axis=1) != np.argmax(compute_inside(weights, data, False), axis=1))
Example #52
0
 def classifying_f(weights, data):
     return np.argmax(compute_inside(weights, data, False), axis=1)
Example #53
0
 def frac_err(params, X, T):
     W_vect = params[:-1]
     alpha = params[-1]
     percent_wrong = np.mean(np.argmax(T, axis=1) != np.argmax(predictions(W_vect, X, alpha), axis=1))
     return percent_wrong
Example #54
0
 def accuracy(inputs, weights, targets):
     lab_targs = np.argmax(targets, axis=1)
     lab_preds = np.argmax(process_one_batch(inputs, weights)[-1], axis=1)
     ac = lab_targs == lab_preds
     return np.mean(ac)
Example #55
0
def accuracy(params, inputs, targets):
    target_class    = np.argmax(targets, axis=1)
    predicted_class = np.argmax(neural_net_predict(params, inputs), axis=1)
    return np.mean(predicted_class == target_class)
Example #56
0
    # read in image and corresponding source
    print "read in images and sources"
    run = 125
    camcol = 1
    field = 17
    tsrcs = sdss.get_tractor_sources_dr9(run, camcol, field)
    imgfits = make_fits_images(run, camcol, field)

    # list of images, list of celeste sources
    imgs = [imgfits[b] for b in BANDS]
    srcs = [tractor_src_to_celestepy_src(s) for s in tsrcs]

    src_types = np.array([src.a for src in srcs])
    rfluxes = np.array([src.fluxes[2] for src in srcs])
    rfluxes[src_types == 0] = -1
    brightest_i = np.argmax(rfluxes)

    # dim 10
    # bright 46
    # medium 1
    def star_arg_squared_loss_single_im(fluxes, galaxy_src, image):
        star = SrcParams(src.u, a=0, fluxes=np.exp(fluxes))
        return squared_loss_single_im(galaxy_src, star, image)

    def star_arg_squared_loss(fluxes, galaxy_src, images):
        star = SrcParams(src.u, a=0, fluxes=np.exp(fluxes))
        return squared_loss(galaxy_src, star, images)


    # do gradient descent
    for src in [srcs[10]]:
Example #57
0
 def frac_err(W_vect, X, T):
     percent_wrong = np.mean(np.argmax(T, axis=1) != np.argmax(predictions(W_vect, X), axis=1))
     return percent_wrong