Example #1
1
def get_tractor_fits_values(T, cat, pat):
    typearray = np.array([fits_typemap[type(src)] for src in cat])
    # If there are no "COMP" sources, the type will be 'S3' rather than 'S4'...
    typearray = typearray.astype("S4")
    T.set(pat % "type", typearray)

    T.set(pat % "ra", np.array([src is not None and src.getPosition().ra for src in cat]))
    T.set(pat % "dec", np.array([src is not None and src.getPosition().dec for src in cat]))

    shapeExp = np.zeros((len(T), 3))
    shapeDev = np.zeros((len(T), 3))
    fracDev = np.zeros(len(T))

    for i, src in enumerate(cat):
        if isinstance(src, ExpGalaxy):
            shapeExp[i, :] = src.shape.getAllParams()
        elif isinstance(src, DevGalaxy):
            shapeDev[i, :] = src.shape.getAllParams()
            fracDev[i] = 1.0
        elif isinstance(src, FixedCompositeGalaxy):
            shapeExp[i, :] = src.shapeExp.getAllParams()
            shapeDev[i, :] = src.shapeDev.getAllParams()
            fracDev[i] = src.fracDev.getValue()

    T.set(pat % "shapeExp", shapeExp.astype(np.float32))
    T.set(pat % "shapeDev", shapeDev.astype(np.float32))
    T.set(pat % "fracDev", fracDev.astype(np.float32))
    return
Example #2
1
def random_test():
    rand_periods = np.zeros(1000)
    periods = np.zeros(1000)
    for i in range(1000):
        rand_periods[i] = np.random.uniform(low=0.0, high=2.0)
        periods[i] = 10 ** rand_periods[i]

    true_periods = np.zeros(1000)
    for i in range(1000):
        data = np.genfromtxt("/Users/angusr/angusr/ACF/star_spot_sim/tests/sim_period%s.txt" % (i + 1))
        true_periods[i] = data

    p.close(4)
    p.figure(4)
    p.subplot(3, 1, 1)
    p.plot(rand_periods, "k.")
    p.subplot(3, 1, 2)
    p.plot(periods, "k.")
    p.subplot(3, 1, 3)
    p.plot(np.log10(true_periods), "k.")

    """ Plotting as close to original periods as I can"""
    p.close(10)
    p.figure(10)
    p.subplot(1, 2, 1)
    orig_periods = np.zeros(100)
    for i in range(100):
        data = np.genfromtxt("/Users/angusr/angusr/ACF/star_spot_sim/sim_period%s.txt" % (i + 1)).T
        p.axhline(np.log10(data[4]), color="k")
    p.subplot(1, 2, 2)
    for i in range(100):
        data = np.genfromtxt("/Users/angusr/angusr/ACF/star_spot_sim/grid/%sparams.txt" % (i + 1)).T
        p.axhline(np.log10(data[7]), color="k")
Example #3
1
    def GetStoichiometry(self):
        """ 
            Returns:
                cids         - a list of pseudoisomer IDs
                obs_ids      - a list of observation IDs
                S            - the observation stoichiometric matrix
                gibbs_values - a row vector of the dG0s
                anchored     - a row vector indicating which obs. are anchored 
        """
        n = len(self.observations)  # number of observations

        cids = set()
        for obs in self.observations:
            cids.update(obs.sparse.keys())
        cids = sorted(cids)

        # create the stoichiometric matrix S (rows=pseudoisomers, cols=observations)
        S = np.matrix(np.zeros((len(cids), n)))
        gibbs_values = np.matrix(np.zeros((1, n)))
        anchored = np.matrix(np.zeros((1, n)))
        obs_ids = []
        for i_obs, obs in enumerate(self.observations):
            obs_ids.append(obs.obs_id)
            for cid, coeff in obs.sparse.iteritems():
                i_cid = cids.index(cid)
                S[i_cid, i_obs] = coeff
            gibbs_values[0, i_obs] = obs.dG0
            if obs.anchored:
                anchored[0, i_obs] = 1

        return cids, S, gibbs_values, anchored
Example #4
1
def scatterplot(x_prop_name, x_property, y_prop_name, y_property, x_err=None, y_err=None, name="scatter_plot"):
    """Normal Scatter Plot"""

    if not x_err:
        x_err = list(numpy.zeros(len(x_property)))
    if not y_err:
        y_err = list(numpy.zeros(len(y_property)))

    matplotlib.pyplot.errorbar(
        numpy.log10(x_property),
        numpy.log10(y_property),
        yerr=y_err,
        xerr=x_err,
        marker="o",
        linestyle="None",
        mfc="blue",
        mec="green",
        ecolor="blue",
    )

    matplotlib.pyplot.xlabel(x_prop_name)
    matplotlib.pyplot.ylabel(y_prop_name)
    matplotlib.pyplot.legend()

    savepath = storepath + name + ".png"
    print "lightcurve saved to " + savepath
    matplotlib.pyplot.savefig(savepath)
    matplotlib.pyplot.close()
Example #5
1
def perform_scaling(data, electrode_indices):
    """This function, suprisingly, performs scaling on a given set of data.

    The data is returned as a dictionary of 8 differently scaled data sets. The
    8 data sets represents all combinations of the following scaling
    methodologies:
        'Vector' of 'MinMax' scaling
        'Within'- or 'Across'-subject scaling
        'All' or 'Few' electrodes included in scaling

    The data can be accessed in the following way:
    
    data_scaled = perform_scaling(data, electrode_indices)
    print(data_scaled['Vector']['Across']['All']
    
    """
    # TODO: Should the scaled for selected electrodes data retain all the
    # unscaled values? I.e. for compatibility with other scripts - i.e. the
    # electrode numbers will be all screwed up...
    data_scaled = {
        "Vector": {"Across": {"All": None, "Few": None}, "Within": {"All": None, "Few": None}},
        "MaxMin": {"Across": {"All": None, "Few": None}, "Within": {"All": None, "Few": None}},
    }

    mean_all = mean(data, 0)
    mean_few = mean(data[:, electrode_indices], 0)

    data_scaled["Vector"]["Across"]["All"] = copy(data) / sqrt(vdot(mean_all, mean_all))
    data_scaled["Vector"]["Across"]["Few"] = copy(data) / sqrt(vdot(mean_few, mean_few))

    min_point = min(mean_all)
    max_point = max(mean_all)
    diff = max_point - min_point
    data_scaled["MaxMin"]["Across"]["All"] = (copy(data) - min_point) / diff
    min_point = min(mean_few)
    max_point = max(mean_few)
    diff = max_point - min_point
    data_scaled["MaxMin"]["Across"]["Few"] = (copy(data) - min_point) / diff

    data_scaled["Vector"]["Within"]["All"] = zeros(data.shape)
    data_scaled["Vector"]["Within"]["Few"] = zeros(data.shape)
    data_scaled["MaxMin"]["Within"]["All"] = zeros(data.shape)
    data_scaled["MaxMin"]["Within"]["Few"] = zeros(data.shape)
    for i in range(data.shape[0]):
        data_scaled["Vector"]["Within"]["All"][i, :] = copy(data[i, :]) / sqrt(vdot(data[i, :], data[i, :]))
        data_scaled["Vector"]["Within"]["Few"][i, :] = copy(data[i, :]) / sqrt(
            vdot(data[i, electrode_indices], data[i, electrode_indices])
        )

        min_point = min(data[i, :])
        max_point = max(data[i, :])
        diff = max_point - min_point
        data_scaled["MaxMin"]["Within"]["All"][i, :] = (copy(data[i, :]) - min_point) / diff

        min_point = min(data[i, electrode_indices])
        max_point = max(data[i, electrode_indices])
        diff = max_point - min_point
        data_scaled["MaxMin"]["Within"]["Few"][i, :] = (copy(data[i, :]) - min_point) / diff

    return data_scaled
    def __init__(self, input, n_in, n_out):
        """ Initialize the parameters of the logistic regression

        :type input: theano.tensor.TensorType
        :param input: symbolic variable that describes the input of the
                      architecture (one minibatch)

        :type n_in: int
        :param n_in: number of input units, the dimension of the space in
                     which the datapoints lie

        :type n_out: int
        :param n_out: number of output units, the dimension of the space in
                      which the labels lie

        """

        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        self.W = theano.shared(value=numpy.zeros((n_in, n_out),
                                                 dtype=theano.config.floatX),
                                name='W', borrow=True)
        # initialize the baises b as a vector of n_out 0s
        self.b = theano.shared(value=numpy.zeros((n_out,),
                                                 dtype=theano.config.floatX),
                               name='b', borrow=True)

        # compute vector of class-membership probabilities in symbolic form
        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)

        # compute prediction as class whose probability is maximal in
        # symbolic form
        self.y_pred = T.argmax(self.p_y_given_x, axis=1)

        # parameters of the model
        self.params = [self.W, self.b]
def load(handle):
    """load(handle) -> MarkovModel()"""
    # Load the states.
    line = _readline_and_check_start(handle, "STATES:")
    states = line.split()[1:]

    # Load the alphabet.
    line = _readline_and_check_start(handle, "ALPHABET:")
    alphabet = line.split()[1:]

    mm = MarkovModel(states, alphabet)
    N, M = len(states), len(alphabet)

    # Load the initial probabilities.
    mm.p_initial = numpy.zeros(N)
    line = _readline_and_check_start(handle, "INITIAL:")
    for i in range(len(states)):
        line = _readline_and_check_start(handle, "  %s:" % states[i])
        mm.p_initial[i] = float(line.split()[-1])

    # Load the transition.
    mm.p_transition = numpy.zeros((N, N))
    line = _readline_and_check_start(handle, "TRANSITION:")
    for i in range(len(states)):
        line = _readline_and_check_start(handle, "  %s:" % states[i])
        mm.p_transition[i, :] = map(float, line.split()[1:])

    # Load the emission.
    mm.p_emission = numpy.zeros((N, M))
    line = _readline_and_check_start(handle, "EMISSION:")
    for i in range(len(states)):
        line = _readline_and_check_start(handle, "  %s:" % states[i])
        mm.p_emission[i, :] = map(float, line.split()[1:])

    return mm
Example #8
1
def test__cooccurence():
    cooccurence = mahotas._texture.cooccurence
    f = np.array([[0, 1, 1, 1], [0, 0, 1, 1], [2, 2, 2, 2]])
    Bc = np.zeros((3, 3), f.dtype)
    Bc[1, 2] = 1
    res = np.zeros((5, 5), np.int32)
    cooccurence(f, res, Bc, 0)
    assert res[0, 0] == 1
    assert res[0, 1] == 2
    assert res[1, 0] == 0
    assert res[1, 1] == 3
    assert res[2, 2] == 3
    assert not np.any(res[2, :2])
    assert not np.any(res[:2, 2])
    res[:3, :3] = 0
    assert not np.any(res)

    res = np.zeros((5, 5), np.int32)
    Bc = np.zeros((3, 3), f.dtype)
    Bc[2, 2] = 1
    cooccurence(f, res, Bc, 0)
    assert res[0, 0] == 1
    assert res[0, 1] == 0
    assert res[0, 2] == 2
    assert res[1, 0] == 0
    assert res[1, 1] == 2
    assert res[1, 2] == 1
    res[:3, :3] = 0
    assert not np.any(res)
Example #9
1
    def build_kernels(self):
        """ Build the synaptic connectivity matrices """
        n = self.n
        # Compute all the possible distances
        dist = [
            self.build_distances(n, 0.917, 0.0, 1.0),
            self.build_distances(n, 0.083, 0.0, 1.0),
            self.build_distances(n, 0.912, 0.83, 1.0),
        ]

        # Create a temporary vector containing gaussians
        g = np.empty((len(self.K), n, n))
        for j in range(len(self.K)):
            for i in range(n):
                # g[j, i] = self.K[j] * self.gaussian(dist[i], self.S[j])
                g[j, i] = self.K[j] * self.g(dist[j][i], self.S[j])
            g[j, self.m : self.k] = 0.0

        # GPe to STN connections
        W12 = np.zeros((n, n))
        W12[: self.m, self.k :] = g[0, self.k :, self.k :]

        # STN to GPe connections
        W21 = np.zeros((n, n))
        W21[self.k :, : self.m] = g[1, : self.m, : self.m]

        # GPe to GPe connections
        W22 = np.zeros((n, n))
        W22[self.k :, self.k :] = g[2, self.k :, self.k :]
        np.fill_diagonal(W22, 0.0)

        return W12, W21, W22, dist
def model_complexity(X_train, y_train, X_test, y_test):
    """Calculate the performance of the model as model complexity increases."""

    print "Model Complexity: "

    # We will vary the depth of decision trees from 2 to 25
    max_depth = np.arange(1, 25)
    train_err = np.zeros(len(max_depth))
    test_err = np.zeros(len(max_depth))

    for i, d in enumerate(max_depth):
        # Setup a Decision Tree Regressor so that it learns a tree with depth d
        regressor = DecisionTreeRegressor(max_depth=d)

        # Fit the learner to the training data
        regressor.fit(X_train, y_train)

        # Find the performance on the training set
        train_err[i] = performance_metric(y_train, regressor.predict(X_train))

        # Find the performance on the testing set
        test_err[i] = performance_metric(y_test, regressor.predict(X_test))

        # Plot the model complexity graph
    model_complexity_graph(max_depth, train_err, test_err)
Example #11
0
    def do_not_use_wignercell(self, origo=numpy.zeros(3), eps=0.000000001):
        # reciprocal_vectors = numpy.linalg.inv( self.lattice_vectors )
        # print reciprocal_vectors
        # print self.lattice_vectors
        ordered = []
        ac = 0
        for atom in self.atoms:
            rho = numpy.zeros(3)
            r = atom.position()
            # TODO: refractor
            for i in range(0, 3):
                for j in range(0, 3):
                    rho[i] += r[j] * reciprocal_vectors[j][i] / self.lattice_constant
                # end for
            # end for
            wigner = True
            for i in range(0, 3):
                if abs(rho[i]) < 0.500000000 - eps:
                    wigner &= True
                else:
                    wigner &= False
                # end if
            # end for
            # print wigner,atom.no(),atom.symbol(),rho

            if wigner:
                print atom.no(), atom.symbol(), rho
                ordered.append(atom)
                ac += 1
            # end if
        # end for
        print "Atoms: ", ac

        self.atoms = ordered
        self.gen_species()
def train_model(graph, seq_params):
    nb_iter = 1
    nb_epochs = 10000
    len_seq = 20
    num_seqs = 100000

    X_train = np.zeros((num_seqs, len_seq, 60), dtype=np.bool)
    y_train = np.zeros((num_seqs, 60), dtype=np.bool)

    from monitoring import LossHistory

    history = LossHistory()
    checkpointer = ModelCheckpoint(filepath=save_model_path, verbose=1, save_best_only=True)

    for e in range(nb_iter):
        print("-" * 40)
        print("Iteration", e)
        print("-" * 40)

        print("Generating training data...")
        get_random_batch(X_train, y_train, seq_params)
        print("Fitting data...")
        earlystopper = EarlyStopping(monitor="val_loss", patience=25, verbose=2)
        graph.fit(
            {"input": X_train, "out1": y_train[:, :60]},
            validation_split=0.3,
            batch_size=128,
            nb_epoch=nb_epochs,
            callbacks=[checkpointer, earlystopper, history],
        )
Example #13
0
 def onkeydown(self, event):
     if event.key == "z":
         self.sign = -self.sign
     elif event.key == "m":
         self.cur_model = (self.cur_model + 1) % len(models)
         self.redraw()
     elif event.key == "n":
         self.cur_model = (self.cur_model - 1) % len(models)
         self.redraw()
     elif event.key == "c":
         self.x = np.zeros((0, 2))
         self.y = np.zeros(0)
         self.redraw()
     elif event.key == "x" and len(self.y) > 0:
         self.y = self.y[:-1]
         self.x = self.x[:-1]
         self.redraw()
     elif event.key == "r":
         self.redraw()
     elif event.key == "d":
         self.plot_prob = not self.plot_prob
         self.redraw()
     elif event.key == "h":
         self.ax.text(0.1, 0.1, self.usage)
         print self.usage
         self.fig.canvas.draw()
     elif event.key == "q":
         sys.exit(0)
Example #14
0
    def forward_iter(self, data):
        data = np.array(data, dtype=np.float32)
        assert np.all(data.shape[1:] == self.shape[1:])
        n_row = self.config.input_shape[0]
        net_input = np.zeros(self.shape, dtype=np.float32)

        s = 0
        e = n_row
        if e > data.shape[0]:
            e = data.shape[0]

        while s < data.shape[0]:
            n_data = e - s
            assert n_data <= n_row
            net_input[:n_data, :, :, :] = data[s:e, :, :, :]
            out = self.net.forward(blobs=[self.net.outputs[0]], **{self.net.inputs[0]: net_input})[self.net.outputs[0]]

            assert n_row == len(out)
            for i in range(n_data):
                yield (NetOutput(out[i].flatten()))

            s += n_row
            e += n_row
            if e < data.shape[0]:
                e = data.shape[0]

            net_input = np.zeros(self.shape, dtype=np.float32)
Example #15
0
    def __init__(self, preRange=7, Data=None):
        # Define the data structure properly
        # Income: Dictionary --> The time and amount
        # Transactions: A predified row based data; keep the number of column to 4: Grocery | Entertain | Other | Schedule
        # Prediction Strategy:
        """
                Prediction Strategy:

                1. The prediction class will be called when the use wants to do prediction
                2. Also, the prediction function will also be called when there is new data added. ( Optional )

                INPUT DATA FORMAT

                X --> Number of days \\times Number of Catogories ( A formulation function can be written for this)
                
                preRange --> Set how many days the user wanted to predict. 

                (optionals):
                    We should probably set up a possible confidence settings.
        """
        self.categoryClass = {"Grocery": 0, "Entertain": 1, "Other": 2, "Schedule": 3}
        self.mean_X = None
        self.std_X = None
        if Data is None:
            self.X = self.acquireData(start=datetime(2010, 1, 1), end=datetime(2011, 12, 31))
        else:
            self.X = Data
        self.preRange = preRange
        self.predict = np.zeros((self.preRange, self.X.shape[1]))
        self.predictAll = np.zeros((self.preRange, 1))
    def apply_mds(self):
        print ("Applying MDS...")
        current_time = time.clock()
        A = np.zeros((self.N, self.N), dtype=np.float32)
        for i in range(self.N):
            for j in range(self.N):
                A[i][j] = -self.graph[i][j] * self.graph[i][j] / 2

        a = np.zeros(self.N, dtype=np.float32)
        for i in range(self.N):
            a[i] = 0.0
            for j in range(self.N):
                a[i] = a[i] + A[i][j] / self.N

        b = 0.0
        for i in range(self.N):
            for j in range(self.N):
                b = b + A[i][j] / (self.N * self.N)

        B = np.zeros((self.N, self.N), dtype=np.float32)
        for i in range(self.N):
            for j in range(self.N):
                B[i][j] = A[i][j] - a[i] - a[j] + b
        eigen_time_start = time.clock()
        eigenvals, eigenvecs = numpy.linalg.eig(B)
        eigenvecs = eigenvecs.real
        eigenvals = eigenvals.real
        eigen_time_total = time.clock() - eigen_time_start
        self.outdata = np.zeros((self.N, self.O), dtype=np.float32)
        for i in range(self.N):
            for j in range(self.O):
                self.outdata[i][j] = eigenvecs[i][j] * sqrt(eigenvals[j])

        print "Time to apply MDS: %6.2f s" % (time.clock() - current_time)
        print "Time to compute eigensystem: %6.2f s" % eigen_time_total
Example #17
0
    def complete_with(self, foods):
        # This function does these things
        # Adjust rdi based on already added foods
        # Build food_mat from foods
        # Normalizes and weights using half of upper-lower
        # Linear regress
        # Remove zero foods from result
        n, m = len(self.target_di), len(foods)
        food_mat = np.zeros([n, m])
        di_amounts = np.zeros([n])
        di_so_far = self.get_di()
        norm_terms = np.ones([n])
        missing = dict()
        for i in range(n):
            di = self.target_di[i]
            norm_terms[i] = (di.upper - di.lower) / 2.0
            di_amounts[i] = (di.lower + norm_terms[i] - di_so_far[i].amount) / norm_terms[i]

            for j in range(m):
                food = foods[j]
                try:
                    food_mat[i][j] = food.nut_amounts[di.nut] / norm_terms[i]
                except KeyError:
                    pass
                    # print "Warning: No nutrient data for " + str(nut) + " in " + str(food)
                    # missing[nut] = (missing.get(nut) or 0) + 1
        amounts, error = op.nnls(food_mat, di_amounts)
        for j in range(m):
            if amounts[j] > 0:
                self.add_food(foods[j], amounts[j] * 100.0)
        return error
 def apply_mds_parallel2(self):
     print ("Applying parallel MDS via SMACOF...")
     current_time = time.clock()
     graph_d = gpu.to_gpu(np.float32(self.graph))
     row_sum_d = gpu.to_gpu(np.float32(np.zeros(self.N)))
     score_current_d = gpu.to_gpu(np.float32(np.random.uniform(0, 10, size=self.N)))
     score_next_d = gpu.to_gpu(np.float32(np.zeros(self.N)))
     sigma_d = gpu.to_gpu(np.float32(np.zeros(self.N)))
     delta_d = gpu.to_gpu(np.float32(np.zeros(self.N)))
     mds2_kernel = cuda_compile(_kernel_source, "mds2_kernel")
     stress = 1
     while stress > 0.001:
         mds2_kernel(
             graph_d,
             row_sum_d,
             score_current_d,
             score_next_d,
             sigma_d,
             delta_d,
             np.int32(self.N),
             block=(1024, 1, 1),
             grid=(int(self.N / 1024 + 1), int(1)),
         )
         score_current_d = score_next_d
         score_next_d = gpu.to_gpu(np.float32(np.zeros(self.N)))
         stress = gpu.sum(sigma_d).get() / gpu.sum(delta_d).get()
     self.outdata = score_current_d.get()
     print "Time to apply parallel MDS: %6.2f s" % (time.clock() - current_time)
def I210_parametric_study(alphas):
    # load the network and its properties
    g_r, d, node, feat = load_I210_modified()
    # modify the costs on non routed network
    g_nr, small_capacity = multiply_cognitive_cost(g_r, feat, 3000.0, 100.0)
    # divide the demand by 4000 to computationally optimize
    d[:, 2] = d[:, 2] / 4000.0

    for alpha in alphas:
        if alpha == 0.0:
            print "non-routed = 1.0, routed = 0.0"
            f_nr = solver_3(g_nr, d, max_iter=1000, stop=1e-3)
            fs = np.zeros((f_nr.shape[0], 2))
            fs[:, 0] = f_nr
        elif alpha == 1.0:
            print "non-routed = 0.0, routed = 1.0"
            f_r = solver_3(g_r, d, max_iter=1000, stop=1e-3)
            fs = np.zeros((f_r.shape[0], 2))
            fs[:, 1] = f_r
        else:
            print "non-routed = {}, routed = {}".format(1 - alpha, alpha)
            d_nr, d_r = heterogeneous_demand(d, alpha)
            fs = gauss_seidel(
                [g_nr, g_r], [d_nr, d_r], solver_3, max_iter=1000, stop=1e-3, stop_cycle=1e-3, q=50, past=20
            )
        np.savetxt("data/I210_modified/test_{}.csv".format(int(alpha * 100)), fs, delimiter=",", header="f_nr,f_r")
    def test_hidden(self):
        shape = (2,)
        a = self.array(shape, intent.hide, None)
        assert_(a.arr.shape == shape)
        assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype)))

        shape = (2, 3)
        a = self.array(shape, intent.hide, None)
        assert_(a.arr.shape == shape)
        assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype)))
        assert_(a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"])

        shape = (2, 3)
        a = self.array(shape, intent.c.hide, None)
        assert_(a.arr.shape == shape)
        assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype)))
        assert_(not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"])

        shape = (-1, 3)
        try:
            a = self.array(shape, intent.hide, None)
        except ValueError as msg:
            if not str(msg).startswith("failed to create intent(cache|hide)|optional array"):
                raise
        else:
            raise SystemError("intent(hide) should have failed on undefined dimensions")
Example #21
0
    def ResMatS(self, H, K, L, W, EXP):
        # [len,H,K,L,W,EXP]=CleanArgs(H,K,L,W,EXP);
        x = self.x
        y = self.y
        z = self.z
        Q = self.modvec(H, K, L, "latticestar")
        uq = N.zeros((3, self.npts), "d")
        uq[0, :] = H / Q
        #% Unit vector along Q
        uq[1, :] = K / Q
        uq[2, :] = L / Q
        xq = self.scalar(x[0, :], x[1, :], x[2, :], uq[0, :], uq[1, :], uq[2, :], "latticestar")
        yq = self.scalar(y[0, :], y[1, :], y[2, :], uq[0, :], uq[1, :], uq[2, :], "latticestar")
        zq = 0
        # %scattering vector assumed to be in (self.orient1,self.orient2) plane;
        tmat = N.zeros((4, 4, self.npts))
        #%Coordinate transformation matrix
        tmat[3, 3, :] = 1
        tmat[2, 2, :] = 1
        tmat[0, 0, :] = xq
        tmat[0, 1, :] = yq
        tmat[1, 1, :] = xq
        tmat[1, 0, :] = -yq

        RMS = N.zeros((4, 4, self.npts))
        rot = N.zeros((3, 3))
        EXProt = EXP

        #        %Sample shape matrix in coordinate system defined by scattering vector
        for i in range(self.npts):
            sample = EXP[i]["sample"]
            if "shape" in sample:
                rot[0, 0] = tmat[0, 0, i]
                rot[1, 0] = tmat[1, 0, i]
                rot[0, 1] = tmat[0, 1, i]
                rot[1, 1] = tmat[1, 1, i]
                rot[2, 2] = tmat[2, 2, i]
                EXProt[i]["sample"]["shape"] = N.dot(rot, N.dot(sample["shape"], rot.T))

        R0, RM = self.ResMat(Q, W, EXProt)

        for i in range(self.npts):
            RMS[:, :, i] = N.dot((tmat[:, :, i]).transpose(), N.dot(RM[:, :, i], tmat[:, :, i]))

        mul = N.zeros((4, 4))
        e = N.eye(4, 4)
        for i in range(self.npts):
            if "Smooth" in EXP[i]:
                if "X" in (EXP[i]["Smooth"]):
                    mul[0, 0] = 1.0 / (EXP[i]["Smooth"]["X"] ** 2 / 8 / N.log(2))
                    mul[1, 1] = 1.0 / (EXP[i]["Smooth"]["Y"] ** 2 / 8 / N.log(2))
                    mul[2, 2] = 1.0 / (EXP[i]["Smooth"]["E"] ** 2 / 8 / N.log(2))
                    mul[3, 3] = 1.0 / (EXP[i]["Smooth"]["Z"] ** 2 / 8 / N.log(2))
                    R0[i] = (
                        R0[i]
                        / N.sqrt(N.linalg.det(e / RMS[:, :, i]))
                        * N.sqrt(N.linalg.det(e / mul + e / RMS[:, :, i]))
                    )
                    RMS[:, :, i] = e / (e / mul + e / RMS[:, :, i])
        return R0, RMS
Example #22
0
    def apply(self, dataset, can_fit=False):
        X = dataset.get_topological_view()
        d = len(X.shape) - 2
        assert d in [2, 3]
        assert X.dtype == "float32" or X.dtype == "float64"
        if d == 2:
            X = X.reshape([X.shape[0], X.shape[1], X.shape[2], 1, X.shape[3]])
        kernel_size = 1
        kernel_shape = [X.shape[-1]]
        for factor in self.sampling_factor:
            kernel_size *= factor
            kernel_shape.append(factor)
        if d == 2:
            kernel_shape.append(1)
        kernel_shape.append(X.shape[-1])
        kernel_value = 1.0 / float(kernel_size)
        kernel = numpy.zeros(kernel_shape, dtype=X.dtype)
        for i in xrange(X.shape[-1]):
            kernel[i, :, :, :, i] = kernel_value
        from theano.tensor.nnet.Conv3D import conv3D

        X_var = tensor.TensorType(broadcastable=[s == 1 for s in X.shape], dtype=X.dtype)()
        downsampled = conv3D(X_var, kernel, numpy.zeros(X.shape[-1], X.dtype), kernel_shape[1:-1])
        f = function([X_var], downsampled)
        X = f(X)
        if d == 2:
            X = X.reshape([X.shape[0], X.shape[1], X.shape[2], X.shape[4]])
        dataset.set_topological_view(X)
Example #23
0
    def grad_EVzxVzxT_by_Z(self, EVzxVzxT_list_this, Z, A, B, p, r):

        P = Z.shape[0]
        R = Z.shape[1]
        N = A.shape[0]

        ainv = 1 / (self.length_scale * self.length_scale)
        siginv = 1 / (B[0, 0] * B[0, 0])

        dZthis = np.zeros([1, R])

        dZthis[0, r] = 1

        res1 = -0.5 * (dZthis.dot(Z[p, :]) + Z[p, :].dot(dZthis.T)) * (ainv - ainv * (1 / (siginv + 2 * ainv)) * ainv)

        res2 = np.tile(dZthis.dot(A.T) * (ainv * (1 / (siginv + 2 * ainv)) * siginv), [P, 1])

        res3 = np.tile(dZthis.dot(Z.T) * (ainv * (1 / (siginv + 2 * ainv)) * ainv), [N, 1])

        dZ = np.zeros([N, P, P])

        dZ[:, p, :] += np.float64(res1) + res2.T + res3
        dZ[:, :, p] += np.float64(res1) + res2.T + res3

        # set the diagonal
        # dZ[:,p,p] = dZ[:,p,p]/2.

        res = np.sum(EVzxVzxT_list_this * dZ, axis=0)

        return res
Example #24
0
    def SplitData(self, category):
        raw = self.X[:, self.categoryClass[category]]
        if self.X.shape[0] >= self.preRange * 10:
            X = np.zeros((1, 9 * self.preRange))
            Y = np.zeros((1, self.preRange))
            for i in xrange(0, self.X.shape[0] - self.preRange * 10):
                X = np.vstack((X, raw[i : i + 9 * self.preRange].reshape((1, 9 * self.preRange))))
                Y = np.vstack((Y, raw[i + 9 * self.preRange : i + 10 * self.preRange].reshape((1, self.preRange))))
        else:
            warnings.warn("More data shall be added or what? I feel like not analyzing you. :/")
            warnings.warn("The data is probably not sufficient enough for the prediction.")
            warnings.warn("The maximum we can do is to predict %f" % (self.X.shape[0] / 4))
            warnings.warn("Anyway, I will produce a predictor based on the existing data")
            X = np.zeros((1, self.preRange))
            Y = np.zeros((1, self.preRange))
            ipdb.set_trace()
            for i in xrange(0, self.X.shape[0] - self.preRange * 1):
                X = np.vstack((X, raw[i : i + 1 * self.preRange].reshape((1, self.preRange))))
                Y = np.vstack((Y, raw[i + 1 * self.preRange : i + 2 * self.preRange].reshape((1, self.preRange))))
        X = X[1:, :].copy()
        Y = Y[1:, :].copy()

        randomize = 0
        if randomize:
            idx = np.random.permutation(X.shape[0])
            X = X[idx].copy()
            Y = Y[idx].copy()
        split_idx = np.floor(0.8 * X.shape[0])
        training_X = X[:split_idx]
        validation_X = X[split_idx:]
        training_Y = Y[:split_idx]
        validation_Y = Y[split_idx:]
        return training_X, training_Y, validation_X, validation_Y
Example #25
0
File: SS.py Project: KuoHaoZeng/VH
def testing_one(sess, video_feat_path, ixtoword, video_tf, video_mask_tf, caption_tf):
    pred_sent = []
    gt_sent = []
    # print video_feat_path
    test_data_batch = h5py.File(video_feat_path)
    gt_captions = test_data_batch["title"]

    video_feat = np.zeros((batch_size, n_frame_step, dim_image))
    video_mask = np.zeros((batch_size, n_frame_step))
    video_feat = np.transpose(test_data_batch["data"], [1, 0, 2])
    for ind in xrange(batch_size):
        idx = np.where(test_data_batch["label"][:, ind] != -1)[0]
        if len(idx) == 0:
            continue
        video_mask[ind, idx[-1]] = 1.0
    generated_word_index = sess.run(caption_tf, feed_dict={video_tf: video_feat, video_mask_tf: video_mask})
    # ipdb.set_trace()

    for ind in xrange(batch_size):
        generated_words = ixtoword[generated_word_index[ind]]
        punctuation = np.argmax(np.array(generated_words) == ".") + 1
        generated_words = generated_words[:punctuation]
        # ipdb.set_trace()
        generated_sentence = " ".join(generated_words)
        pred_sent.append([{"caption": generated_sentence}])
        gt_sent.append([{"caption": gt_captions[ind]}])
    return pred_sent, gt_sent
def polint(xa, ya, x):
    n = len(xa)
    if not (len(xa) == len(ya)):
        print "Input x and y arrays must be same length"
        return "Error"
    # Set up auxiliary arrays
    c = Numeric.zeros(n, Numeric.Float)
    d = Numeric.zeros(n, Numeric.Float)
    c[:] = ya[:]
    d[:] = ya[:]
    # Find closest table entry
    ns = 0
    diff = abs(xa[0] - x)
    for i in range(n):
        difft = abs(xa[i] - x)
        if difft < diff:
            diff = difft
            ns = i
    y = ya[ns]
    for m in range(1, n):
        for i in range(n - m):
            ho = xa[i] - x
            hp = xa[i + m] - x
            w = c[i + 1] - d[i]
            c[i] = ho * w / (ho - hp)
            d[i] = hp * w / (ho - hp)
        if 2 * ns < (n - m):
            dy = c[ns]
        else:
            ns -= 1
            dy = d[ns]
        y += dy
        # You can also return dy as an error estimate. Here
        # to keep things simple, we just return y.
    return y
Example #27
0
def conv3d_oneToMany(x, xShape, w, wShape, strideT, strideY, strideX, inName):
    [ntp, nyp, nxp, nifp, nofp] = wShape
    [nb, nt, ny, nx, nf] = xShape

    # stride must be divisible by both weights and input
    assert ntp % strideT == 0
    assert nyp % strideY == 0
    assert nxp % strideX == 0
    assert nt % strideT == 0
    assert ny % strideY == 0
    assert nx % strideX == 0

    assert nifp == nf

    print "Building weight indices for conv3d"
    # Build gather indices for weights
    # Must be in shape of target output weights
    weightIdxs = np.zeros(
        (int(ntp / strideT), int(nyp / strideY), int(nxp / strideX), nifp, nofp * strideT * strideX * strideY, 5)
    ).astype(np.int32)
    # Adding kernel number to end of features
    for itp in range(ntp):
        for iyp in range(nyp):
            for ixp in range(nxp):
                for iifp in range(nifp):
                    for iofp in range(nofp):
                        # Calculate output indices given input indices
                        # Must reverse, as we're using conv2d as transpose conv2d
                        otp = int((ntp - itp - 1) / strideT)
                        oyp = int((nyp - iyp - 1) / strideY)
                        oxp = int((nxp - ixp - 1) / strideX)
                        oifp = iifp  # Input features stay the same
                        # oofp uses iofp as offset, plus an nf stride based on which kernel it belongs to
                        kernelIdx = (itp % strideT) * strideY * strideX + (iyp % strideY) * strideX + (ixp % strideX)
                        oofp = iofp + nofp * kernelIdx
                        weightIdxs[otp, oyp, oxp, oifp, oofp, :] = [itp, iyp, ixp, iifp, iofp]

    print "Building output indices for conv3d"
    # Build gather indices for output
    # Must be in shape of target output data
    dataIdxs = np.zeros((nb, nt * strideT, ny * strideY, nx * strideX, nofp, 5)).astype(np.int32)
    for oob in range(nb):
        for oot in range(nt * strideT):
            for ooy in range(ny * strideY):
                for oox in range(nx * strideX):
                    for oof in range(nofp):
                        # Calculate input indices given output indices
                        iib = oob
                        iit = oot / strideT
                        iiy = ooy / strideY
                        iix = oox / strideX
                        kernelIdx = (oot % strideT) * strideY * strideX + (ooy % strideY) * strideX + (oox % strideX)
                        iif = oof + nofp * kernelIdx
                        dataIdxs[oob, oot, ooy, oox, oof, :] = [iib, iit, iiy, iix, iif]

    # Build convolution structure
    w_reshape = tf.gather_nd(w, weightIdxs)
    o_reshape = tf.nn.conv3d(x, w_reshape, strides=[1, 1, 1, 1, 1], padding="SAME", name=inName)
    o = tf.gather_nd(o_reshape, dataIdxs)
    return o
Example #28
0
File: node.py Project: CamZHU/nengo
def build_node(model, node):
    # input signal
    if not is_array_like(node.output) and node.size_in > 0:
        sig_in = Signal(np.zeros(node.size_in), name="%s.in" % node)
        model.add_op(Reset(sig_in))
    else:
        sig_in = None

    # Provide output
    if node.output is None:
        sig_out = sig_in
    elif isinstance(node.output, Process):
        sig_out = Signal(np.zeros(node.size_out), name="%s.out" % node)
        model.build(node.output, sig_in, sig_out)
    elif callable(node.output):
        sig_out = Signal(np.zeros(node.size_out), name="%s.out" % node) if node.size_out > 0 else None
        model.add_op(SimPyFunc(output=sig_out, fn=node.output, t_in=True, x=sig_in))
    elif is_array_like(node.output):
        sig_out = Signal(node.output, name="%s.out" % node)
    else:
        raise ValueError("Invalid node output type '%s'" % (node.output.__class__.__name__))

    model.sig[node]["in"] = sig_in
    model.sig[node]["out"] = sig_out
    model.params[node] = None
Example #29
0
def make_video(events, t0=0.0, t1=None, dt_frame=0.01, tau=0.01):
    if t1 is None:
        t1 = events["t"].max()

    ts = events["t"]
    dt = 1e-3
    nt = int((t1 - t0) / dt) + 1
    # nt = min(nt, 1000)  # cap at 1000 for now

    image = np.zeros((128, 128))
    images = np.zeros((nt, 128, 128))

    for i in range(nt):
        # --- decay image
        image *= np.exp(-dt / tau) if tau > 0 else 0
        # image *= 0

        # --- add events
        ti = t0 + i * dt
        add_to_image(image, events[close(ts, ti)])

        images[i] = image

    # --- average in frames
    nt_frame = int(dt_frame / dt)
    nt_video = int(nt / nt_frame)

    video = np.zeros((nt_video, 128, 128))
    for i in range(nt_video):
        slicei = slice(i * nt_frame, (i + 1) * nt_frame)
        video[i] = np.sum(images[slicei], axis=0)

    return video
def _mle(N, M, training_outputs, training_states, pseudo_initial, pseudo_transition, pseudo_emission):
    # p_initial is the probability that a sequence of states starts
    # off with a particular one.
    p_initial = numpy.zeros(N)
    if pseudo_initial:
        p_initial = p_initial + pseudo_initial
    for states in training_states:
        p_initial[states[0]] += 1
    p_initial = _normalize(p_initial)

    # p_transition is the probability that a state leads to the next
    # one.  C(i,j)/C(i) where i and j are states.
    p_transition = numpy.zeros((N, N))
    if pseudo_transition:
        p_transition = p_transition + pseudo_transition
    for states in training_states:
        for n in range(len(states) - 1):
            i, j = states[n], states[n + 1]
            p_transition[i, j] += 1
    for i in range(len(p_transition)):
        p_transition[i, :] = p_transition[i, :] / sum(p_transition[i, :])

    # p_emission is the probability of an output given a state.
    # C(s,o)|C(s) where o is an output and s is a state.
    p_emission = numpy.zeros((N, M))
    if pseudo_emission:
        p_emission = p_emission + pseudo_emission
    p_emission = numpy.ones((N, M))
    for outputs, states in zip(training_outputs, training_states):
        for o, s in zip(outputs, states):
            p_emission[s, o] += 1
    for i in range(len(p_emission)):
        p_emission[i, :] = p_emission[i, :] / sum(p_emission[i, :])

    return p_initial, p_transition, p_emission