Example #1
1
    def rotate_and_bias(self, global_best, tc=0.1, b=0.5, cp=0.5):
        """
            Rotates the covariance matrix and biases the centroid of this
            CMA population towards a global mean. Can be used to implement a
            PSO-CMA hybrid algorithm. 
        """

        global_best = self._translate_external(global_best)
        centroid = self._translate_external(self.centroid)

        # Rotate towards global:
        pg = np.array(global_best) - np.array(centroid)
        Brot = self.__rotation_matrix(self.B[:, 0], pg) * self.B
        Crot = Brot * (self.diagD ** 2) * Brot.T
        self.C = cp * self.C + (1.0 - cp) * Crot

        # Bias our mean towards global best mean:
        npg = np.linalg.norm(pg)
        nsigma = np.amax(self.sigma)
        if nsigma < npg:
            if nsigma / npg <= tc * npg:
                bias = b * pg
            else:
                bias = nsigma / npg * pg
        else:
            bias = 0

        centroid = centroid + bias

        self.centroid = self._translate_internal(centroid)

        pass
def get_btchromas_loudness(h5):
    """
    Similar to btchroma, but adds the loudness back.
    We use the segments_loudness_max
    There is no max value constraint, simply no negative values.
    """
    # if string, open and get chromas, if h5, get chromas
    if type(h5).__name__ == "str":
        h5 = GETTERS.open_h5_file_read(h5)
        chromas = GETTERS.get_segments_pitches(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        loudnessmax = GETTERS.get_segments_loudness_max(h5)
        h5.close()
    else:
        chromas = GETTERS.get_segments_pitches(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        loudnessmax = GETTERS.get_segments_loudness_max(h5)
    # get the series of starts for segments and beats
    segstarts = np.array(segstarts).flatten()
    btstarts = np.array(btstarts).flatten()
    # add back loudness
    chromas = chromas.T * idB(loudnessmax)
    # aligned features
    btchroma = align_feats(chromas, segstarts, btstarts, duration)
    if btchroma is None:
        return None
    # done (no renormalization)
    return btchroma
    def flatten(self, vectorlength=16):
        """
        Returns a list of flattened n-element board vectors and a list of correct moves corresponding
        to the board states.
        :return: A list of board vectors and a list of corresponding correct moves
        """

        log = logging.getLogger(__name__)
        log.info("Flattening datastructure...")

        if self.games_flat is None and self.labels_flat is None:
            out_games = np.array([], dtype="uint32")
            for game in self.games["boards"]:
                out_games = np.append(out_games, np.array([b for b in game], dtype="uint32"))

            self.games_flat = out_games.reshape((len(out_games) / vectorlength, vectorlength))
            out_labels = np.array([], dtype="uint32")
            for label_set in self.games["moves"]:
                out_labels = np.append(out_labels, np.array(label_set))
            out_labels.flatten()
            self.labels_flat = out_labels

            # Final check to see that we have matching board and move vectors
            assert len(self.games_flat) == len(self.labels_flat)

        return self.games_flat, self.labels_flat
Example #4
1
def get_tractor_fits_values(T, cat, pat):
    typearray = np.array([fits_typemap[type(src)] for src in cat])
    # If there are no "COMP" sources, the type will be 'S3' rather than 'S4'...
    typearray = typearray.astype("S4")
    T.set(pat % "type", typearray)

    T.set(pat % "ra", np.array([src is not None and src.getPosition().ra for src in cat]))
    T.set(pat % "dec", np.array([src is not None and src.getPosition().dec for src in cat]))

    shapeExp = np.zeros((len(T), 3))
    shapeDev = np.zeros((len(T), 3))
    fracDev = np.zeros(len(T))

    for i, src in enumerate(cat):
        if isinstance(src, ExpGalaxy):
            shapeExp[i, :] = src.shape.getAllParams()
        elif isinstance(src, DevGalaxy):
            shapeDev[i, :] = src.shape.getAllParams()
            fracDev[i] = 1.0
        elif isinstance(src, FixedCompositeGalaxy):
            shapeExp[i, :] = src.shapeExp.getAllParams()
            shapeDev[i, :] = src.shapeDev.getAllParams()
            fracDev[i] = src.fracDev.getValue()

    T.set(pat % "shapeExp", shapeExp.astype(np.float32))
    T.set(pat % "shapeDev", shapeDev.astype(np.float32))
    T.set(pat % "fracDev", fracDev.astype(np.float32))
    return
Example #5
1
def test_lcmv_raw():
    """Test LCMV with raw data
    """
    tmin, tmax = 0, 20
    # Setup for reading the raw data
    raw.info["bads"] = ["MEG 2443", "EEG 053"]  # 2 bads channels

    # Set up pick list: EEG + MEG - bad channels (modify to your needs)
    left_temporal_channels = mne.read_selection("Left-temporal")
    picks = mne.fiff.pick_types(
        raw.info, meg=True, eeg=False, stim=True, eog=True, exclude="bads", selection=left_temporal_channels
    )

    noise_cov = mne.read_cov(fname_cov)
    noise_cov = mne.cov.regularize(noise_cov, raw.info, mag=0.05, grad=0.05, eeg=0.1, proj=True)

    start, stop = raw.time_as_index([tmin, tmax])

    # use only the left-temporal MEG channels for LCMV
    picks = mne.fiff.pick_types(raw.info, meg=True, exclude="bads", selection=left_temporal_channels)

    data_cov = mne.compute_raw_data_covariance(raw, tmin=tmin, tmax=tmax)

    stc = lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=label, start=start, stop=stop, picks=picks)

    assert_array_almost_equal(np.array([tmin, tmax]), np.array([stc.times[0], stc.times[-1]]), decimal=2)

    # make sure we get an stc with vertices only in the lh
    vertno = [forward["src"][0]["vertno"], forward["src"][1]["vertno"]]
    assert_true(len(stc.vertno[0]) == len(np.intersect1d(vertno[0], label.vertices)))
    assert_true(len(stc.vertno[1]) == 0)
Example #6
1
def eccentricity(z_array=None, z_object=None, pt_array=None, pt_object=None):

    if z_array is not None:
        pt_obj = MTpt.PhaseTensor(z_array=z_array)
    elif z_object is not None:
        if not isinstance(z_object, MTz.Z):
            raise MTex.MTpyError_Z("Input argument is not an instance of the Z class")
        print "assssss"
        pt_obj = MTpt.PhaseTensor(z_object=z_object)
    elif pt_array is not None:
        pt_obj = MTpt.PhaseTensor(pt_array=pt_array)
    elif pt_object is not None:
        if not isinstance(pt_object, MTpt.PhaseTensor):
            raise MTex.MTpyError_PT("Input argument is not an instance of the PhaseTensor class")
        pt_obj = pt_object

    lo_ecc = []
    lo_eccerr = []

    if not isinstance(pt_obj, MTpt.PhaseTensor):
        raise MTex.MTpyError_PT("Input argument is not an instance of the PhaseTensor class")

    for idx_f in range(len(pt_obj.pt)):
        lo_ecc.append(pt_obj._pi1()[0][idx_f] / pt_obj._pi2()[0][idx_f])

        ecc_err = None
        if (pt_obj._pi1()[1] is not None) and (pt_obj._pi2()[1] is not None):
            ecc_err = np.sqrt(
                (pt_obj._pi1()[1][idx_f] / pt_obj._pi1()[0][idx_f]) ** 2
                + (pt_obj._pi2()[1][idx_f] / pt_obj._pi2()[0][idx_f]) ** 2
            )

        lo_eccerr.append(ecc_err)

    return np.array(lo_ecc), np.array(lo_eccerr)
def correlateTimeseries(A, B):

    # Convert the time series to relative time
    aDate = A["date"] - A["date"].iat[0]
    bDate = B["date"] - B["date"].iat[0]

    # Prepare indices for matched data points
    datesMatched = np.searchsorted(aDate, bDate)
    l = len(aDate) - 1
    datesMatched[datesMatched > l] = l
    c = dict()
    keyword = "price"
    # Select data according to matched indices
    a = np.array(A[keyword].values)
    aReduced = a[datesMatched]
    bReduced = np.array(B[keyword].values)
    # Correct to the baseline
    aReduced = aReduced - np.mean(aReduced)
    bReduced = bReduced - np.mean(bReduced)
    # Perform the z-transformation
    zA = aReduced / np.sqrt(np.sum(np.square(aReduced)) / l)
    zB = bReduced / np.sqrt(np.sum(np.square(bReduced)) / l)
    # Calculate the correlation
    r = pearsonr(zA, zB)
    return r[1]
Example #8
0
def test_check_symmetric():
    arr_sym = np.array([[0, 1], [1, 2]])
    arr_bad = np.ones(2)
    arr_asym = np.array([[0, 2], [0, 2]])

    test_arrays = {
        "dense": arr_asym,
        "dok": sp.dok_matrix(arr_asym),
        "csr": sp.csr_matrix(arr_asym),
        "csc": sp.csc_matrix(arr_asym),
        "coo": sp.coo_matrix(arr_asym),
        "lil": sp.lil_matrix(arr_asym),
        "bsr": sp.bsr_matrix(arr_asym),
    }

    # check error for bad inputs
    assert_raises(ValueError, check_symmetric, arr_bad)

    # check that asymmetric arrays are properly symmetrized
    for arr_format, arr in test_arrays.items():
        # Check for warnings and errors
        assert_warns(UserWarning, check_symmetric, arr)
        assert_raises(ValueError, check_symmetric, arr, raise_exception=True)

        output = check_symmetric(arr, raise_warning=False)
        if sp.issparse(output):
            assert_equal(output.format, arr_format)
            assert_array_equal(output.toarray(), arr_sym)
        else:
            assert_array_equal(output, arr_sym)
Example #9
0
    def test_fullWrapping(self):
        """
        Test basic wrapping functionality (all slots are promoted)
        """
        wrapped = OperatorWrapper(OpSimple, graph=self.graph)
        assert type(wrapped.InputA) == InputSlot
        assert type(wrapped.InputB) == InputSlot
        assert type(wrapped.Output) == OutputSlot
        assert wrapped.InputA.level == 1
        assert wrapped.InputB.level == 1
        assert wrapped.Output.level == 1

        assert len(wrapped.InputA) == 0
        assert len(wrapped.InputB) == 0
        assert len(wrapped.Output) == 0

        wrapped.InputA.resize(2)
        assert len(wrapped.InputB) == 2
        assert len(wrapped.Output) == 2

        a = numpy.array([[1, 2], [3, 4]])
        b = numpy.array([2])
        wrapped.InputA[0].setValue(a)
        wrapped.InputB[0].setValue(b)
        wrapped.InputA[1].setValue(2 * a)
        wrapped.InputB[1].setValue(3 * b)

        result0 = wrapped.Output[0][0:2, 0:2].wait()
        result1 = wrapped.Output[1][0:2, 0:2].wait()
        assert (result0 == a * b[0]).all()
        assert (result1 == 2 * a * 3 * b[0]).all()
Example #10
0
def breakTrans(T):
    if not (T.shape[0] == 4 and T.shape[1] == 4):
        raise RuntimeError("Invalid transformation matrix shape:" + str(T.shape))
    else:
        R = np.array([[T[i][j] for j in range(3)] for i in range(3)])
        p = np.array([[T[j][3]] for j in range(3)])
        return (R, p)
Example #11
0
def draw_floor(shadow_flag):
    global floor_normal_vec
    # ==========床描画==========
    # [memo]
    # 法線(a,b,c)で点P(x0,y0,z0)を通る平面の方程式はax+by+cz+d=0とかける
    # d = a*(-x0) + b*(-y0) + c*(-z0)
    # ==========================
    if shadow_flag == 1:
        # glEnable(GL_BLEND)
        # glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
        # glColor4f(1.0, 0.1, 0.1, 1.0)
        param_floor = np.array([[0.3, 0.3, 0.3, 1.0], [0.0, 0.0, 0.0, 1.0]])
    else:
        param_floor = np.array([[0.6, 0.6, 0.6, 1.0], [0.3, 0.3, 0.3, 1.0]])
    # タイル状に床を描画(したい)
    glEnable(GL_NORMALIZE)
    glBegin(GL_QUADS)
    glNormal3fv(floor_normal_vec)
    for i in range(-5, 4):
        for j in range(-5, 4):
            glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, param_floor[(i + j) % 2])
            glVertex3f(i / 10.0, 0.0, j / 10.0)
            glVertex3f(i / 10.0, 0.0, j / 10.0 + 0.1)
            glVertex3f(i / 10.0 + 0.1, 0.0, j / 10.0 + 0.1)
            glVertex3f(i / 10.0 + 0.1, 0.0, j / 10.0)
    glEnd()
    # if shadow_flag == 1:
    # glDisable(GL_BLEND)
    glDisable(GL_NORMALIZE)
Example #12
0
    def test_procrustes2(self):
        """procrustes disparity should not depend on order of matrices"""
        m1, m3, disp13 = procrustes(self.data1, self.data3)
        m3_2, m1_2, disp31 = procrustes(self.data3, self.data1)
        self.assertFloatEqual(disp13, disp31)

        # try with 3d, 8 pts per
        rand1 = array(
            [
                [2.61955202, 0.30522265, 0.55515826],
                [0.41124708, -0.03966978, -0.31854548],
                [0.91910318, 1.39451809, -0.15295084],
                [2.00452023, 0.50150048, 0.29485268],
                [0.09453595, 0.67528885, 0.03283872],
                [0.07015232, 2.18892599, -1.67266852],
                [0.65029688, 1.60551637, 0.80013549],
                [-0.6607528, 0.53644208, 0.17033891],
            ]
        )

        rand3 = array(
            [
                [0.0809969, 0.09731461, -0.173442],
                [-1.84888465, -0.92589646, -1.29335743],
                [0.67031855, -1.35957463, 0.41938621],
                [0.73967209, -0.20230757, 0.52418027],
                [0.17752796, 0.09065607, 0.29827466],
                [0.47999368, -0.88455717, -0.57547934],
                [-0.11486344, -0.12608506, -0.3395779],
                [-0.86106154, -0.28687488, 0.9644429],
            ]
        )
        res1, res3, disp13 = procrustes(rand1, rand3)
        res3_2, res1_2, disp31 = procrustes(rand3, rand1)
        self.assertFloatEqual(disp13, disp31)
Example #13
0
    def setUp(self):
        """creates inputs"""
        # an L
        self.data1 = array([[1, 3], [1, 2], [1, 1], [2, 1]], "d")

        # a larger, shifted, mirrored L
        self.data2 = array([[4, -2], [4, -4], [4, -6], [2, -6]], "d")

        # an L shifted up 1, right 1, and with point 4 shifted an extra .5
        # to the right
        # pointwise distance disparity with data1: 3*(2) + (1 + 1.5^2)
        self.data3 = array([[2, 4], [2, 3], [2, 2], [3, 2.5]], "d")

        # data4, data5 are standardized (trace(A*A') = 1).
        # procrustes should return an identical copy if they are used
        # as the first matrix argument.
        shiftangle = pi / 8
        self.data4 = array([[1, 0], [0, 1], [-1, 0], [0, -1]], "d") / sqrt(4)
        self.data5 = array(
            [
                [cos(shiftangle), sin(shiftangle)],
                [cos(pi / 2 - shiftangle), sin(pi / 2 - shiftangle)],
                [-cos(shiftangle), -sin(shiftangle)],
                [-cos(pi / 2 - shiftangle), -sin(pi / 2 - shiftangle)],
            ],
            "d",
        ) / sqrt(4)
Example #14
0
def test_patterns_reduced_case():
    """ Less sources than signals. """
    # source and noise are chosen so that
    # noise is uncorrelated with source
    source1 = np.array([1, 3, 1, 4, 8, 1])
    source2 = np.array([1, 5, 2, 4, -2, 3])
    source = np.vstack((source1, source2))
    noise1 = [0.4, 0.3, -0.5, -0.2, 0.2, 0.5]
    noise2 = [-0.3, -0.5, +0.5, +0.4, -0.2, -0.5]
    noise3 = [-0.2, -0.5, +0.5, +0.4, -0.2, -0.5]
    # Mix source to signal plus some noise
    signal1 = 0.2 * source1 + 0.5 * source2 + noise1
    signal2 = 0.8 * source1 - 0.2 * source2 + noise2
    signal3 = 0.5 * source1 - 0.3 * source2 + noise3
    signal = np.vstack((signal1, signal2, signal3))
    W, _, _, _ = np.linalg.lstsq(signal.T, source.T)
    reconstructed = np.dot(W.T, signal)
    patterns = transform_weights_to_patterns(W.T, signal, reconstructed)
    # Note columns are  close to given mixing 0.2/0.5, 0.8/-0.2, 0.5/-0.3
    assert np.allclose([[0.18594579, 0.80136242, 0.49412571], [0.52567645, -0.25482237, -0.36055654]], patterns)
    # Also compare to solution by inversion
    pattern_by_inversion = np.linalg.pinv(W)
    assert not np.allclose(pattern_by_inversion, patterns), (
        "In reduced case "
        "inversion does not have to lead to same result as patterns and "
        "in this case it shouldn't."
    )
    def create_binary_tree(self):
        """
        Create a binary Huffman tree using stored vocabulary word counts. Frequent words
        will have shorter binary codes. Called internally from `build_vocab()`.

        """
        logger.info("constructing a huffman tree from %i words" % len(self.vocab))

        # build the huffman tree
        heap = self.vocab.values()
        heapq.heapify(heap)
        for i in xrange(len(self.vocab) - 1):
            min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
            heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2))

        # recurse over the tree, assigning a binary code to each vocabulary word
        if heap:
            max_depth, stack = 0, [(heap[0], [], [])]
            while stack:
                node, codes, points = stack.pop()
                if node.index < len(self.vocab):
                    # leaf node => store its path from the root
                    node.code, node.point = codes, points
                    max_depth = max(len(codes), max_depth)
                else:
                    # inner node => continue recursion
                    points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32)
                    stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
                    stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))

            logger.info("built huffman tree with maximum node depth %i" % max_depth)
Example #16
0
def survey_footprint(ax, survey_filename, FoV):
    """ This function plots the survey footprint map on the given matplotlib
    axes instance

    Input
    -----
    ax                      :       matplotlib axes instance
        The axes instance to plot the footprint map to
    survey_filename         :       string
        The path to the survey file

    """

    survey = np.loadtxt(survey_filename)

    x_min = -FoV[0] / 2
    y_min = -FoV[1] / 2
    x_max = FoV[0] / 2
    y_max = FoV[1] / 2
    x = np.array([x_min, x_min, x_max, x_max, x_min])
    y = np.array([y_min, y_max, y_max, y_min, y_min])

    for image in survey:
        alpha, beta = transformations.fp2sky(x, y, image[1:3], image[3])
        ax.plot(alpha, beta, "k-", alpha=0.5)

    return None
 def B_F(self, K, u):  # Правая часть уравнения Эйлера
     f = []
     K = array(K)
     u = array(u)
     for j in xrange(self.n):
         f.append((K[:, j] * u).sum() * self.hX)
     return array(f)
Example #18
0
    def test_partialWrapping(self):
        """
        By default, OperatorWrapper promotes all slots.
        This function tests what happens when only a subset of the inputs are promoted.
        """
        wrapped = OperatorWrapper(OpSimple, graph=self.graph, promotedSlotNames=set(["InputA"]))
        assert type(wrapped.InputA) == InputSlot
        assert type(wrapped.InputB) == InputSlot
        assert type(wrapped.Output) == OutputSlot
        assert wrapped.InputA.level == 1  # Promoted because it was listed in the constructor call
        assert wrapped.InputB.level == 0  # NOT promoted
        assert wrapped.Output.level == 1  # Promoted because it's an output

        assert len(wrapped.InputA) == 0
        assert len(wrapped.InputB) == 0
        assert len(wrapped.Output) == 0

        wrapped.InputA.resize(2)
        assert len(wrapped.InputB) == 0  # Not promoted
        assert len(wrapped.Output) == 2

        a = numpy.array([[1, 2], [3, 4]])
        b = numpy.array([2])
        wrapped.InputA[0].setValue(a)
        wrapped.InputB.setValue(b)
        wrapped.InputA[1].setValue(2 * a)

        result0 = wrapped.Output[0][0:2, 0:2].wait()
        result1 = wrapped.Output[1][0:2, 0:2].wait()
        assert (result0 == a * b[0]).all()
        assert (result1 == 2 * a * b[0]).all()
Example #19
0
    def __init__(self, num_neurons, prev_layer=None):
        """Constructs a layer with given number of neurons.

        Args:
            num_neurons: Number of neurons in this layer.
            prev_layer: Previous layer which acts as input to this
                        layer. None for input layer.
        """

        # x : Activation vector of the neurons.
        # nets : Vector of weighted sum of inputs of the neurons.
        # deltas : Delta error vector, used to adjust the weights.
        self.x = np.array([0] * num_neurons)
        self.nets = np.array([0] * num_neurons)
        self.deltas = np.array([0] * num_neurons)

        self.prev_layer = prev_layer

        # If previous layer exists, create a weight matrix
        # with random values.
        if prev_layer:
            self.weights = []
            for i in range(num_neurons):

                # Each neuron is connected to all neurons of previous layer
                # plus a constant input of '1' (the weight of which is
                # bias). So total number of weights = num_inputs + 1.

                prev_x_len = len(prev_layer.x) + 1
                w = [get_random_weight(prev_x_len) for _ in range(prev_x_len)]
                self.weights.append(w)

            self.weights = np.matrix(self.weights)
Example #20
0
    def get_mask_slice(self, orientation, slice_number):
        """ 
        It gets the from actual mask the given slice from given orientation
        """
        # It's necessary because the first position for each dimension from
        # mask matrix is used as flags to control if the mask in the
        # slice_number position has been generated.
        if self.buffer_slices[orientation].index == slice_number and self.buffer_slices[orientation].mask is not None:
            return self.buffer_slices[orientation].mask
        n = slice_number + 1
        if orientation == "AXIAL":
            if self.current_mask.matrix[n, 0, 0] == 0:
                mask = self.current_mask.matrix[n, 1:, 1:]
                mask[:] = self.do_threshold_to_a_slice(self.get_image_slice(orientation, slice_number), mask)
                self.current_mask.matrix[n, 0, 0] = 1
            n_mask = numpy.array(self.current_mask.matrix[n, 1:, 1:], dtype=self.current_mask.matrix.dtype)

        elif orientation == "CORONAL":
            if self.current_mask.matrix[0, n, 0] == 0:
                mask = self.current_mask.matrix[1:, n, 1:]
                mask[:] = self.do_threshold_to_a_slice(self.get_image_slice(orientation, slice_number), mask)
                self.current_mask.matrix[0, n, 0] = 1
            n_mask = numpy.array(self.current_mask.matrix[1:, n, 1:], dtype=self.current_mask.matrix.dtype)

        elif orientation == "SAGITAL":
            if self.current_mask.matrix[0, 0, n] == 0:
                mask = self.current_mask.matrix[1:, 1:, n]
                mask[:] = self.do_threshold_to_a_slice(self.get_image_slice(orientation, slice_number), mask)
                self.current_mask.matrix[0, 0, n] = 1
            n_mask = numpy.array(self.current_mask.matrix[1:, 1:, n], dtype=self.current_mask.matrix.dtype)

        return n_mask
Example #21
0
    def route_flow(self):
        """Route flow across lake flats.

        Route flow across lake flats, which have already been identified.
        """
        for outlet_node in self.lake_outlets:
            nodes_in_lake = np.where(self.depression_outlet == outlet_node)[0]
            if len(nodes_in_lake) > 0:
                nodes_routed = np.array([outlet_node])
                # ^using set on assumption of cythonizing later
                nodes_on_front = np.array([outlet_node])
                self.handle_outlet_node(outlet_node, nodes_in_lake)
                while (len(nodes_in_lake) + 1) != len(nodes_routed):
                    all_neighbors = np.hstack(
                        (self._grid.get_neighbor_list(nodes_on_front), self._grid.get_diagonal_list(nodes_on_front))
                    )
                    outlake = np.logical_not(np.in1d(all_neighbors.flat, nodes_in_lake))
                    all_neighbors[outlake.reshape(all_neighbors.shape)] = -1
                    backflow = np.in1d(all_neighbors, nodes_routed)
                    all_neighbors[backflow.reshape(all_neighbors.shape)] = -1
                    (drains_from, unique_indices) = np.unique(all_neighbors, return_index=True)
                    # ^gets flattened, but, usefully, unique_indices are *in order*
                    # remember, 1st one is always -1
                    # I bet this is sloooooooooow
                    drains_to = nodes_on_front[unique_indices[1:] // 8]
                    # to run the accumulator successfully, we need receivers, and
                    # sinks only. So the priority is sorting out the receiver
                    # field, and sealing the filled sinks (once while loop is done)
                    self.receivers[drains_from[1:]] = drains_to
                    # now put the relevant nodes in the relevant places:
                    nodes_on_front = drains_from[1:]
                    nodes_routed = np.union1d(nodes_routed, nodes_on_front)
                    self.grads[drains_from[1:]] = 0.0  # downstream grad is 0.
        self.sinks[self.pit_node_ids] = False
Example #22
0
    def setUp(self):
        a = easy_array((10, 15, 3, 2))
        darray = DataArray(a, dims=["y", "x", "col", "row"])
        darray.coords["col"] = np.array(["col" + str(x) for x in darray.coords["col"].values])
        darray.coords["row"] = np.array(["row" + str(x) for x in darray.coords["row"].values])

        self.darray = darray
    def test_no_development_with_zero_target_vacancy(self):
        """If the target vacany ratest are 0%, then no development should occur and thus,
        the results returned (which represents development projects) should be empty.
        In fact anytime the target vacancy rate is strictly less than the current vacancy rate,
        then no development should ever occur.
        """

        """specify that the target vacancies for the year 2000 should be 0% for both
        residential and non-residential. with these constrains, no new development projects
        should be spawned for any set of agents."""
        self.storage.write_table(
            table_name="target_vacancies",
            table_data={
                "year": array([2000, 2000, 2000]),
                "building_type_id": array([1, 2, 4]),
                "occupied_units": array(["occupied_sqft", "occupied_sqft", "number_of_households"]),
                "total_units": array(["non_residential_sqft", "non_residential_sqft", "residential_units"]),
                "target_vacancy": array([0.0, 0, 0]),
            },
        )

        dptm = RealEstateTransitionModel(target_vancy_dataset=self.dataset_pool.get_dataset("target_vacancy"))
        results, index = dptm.run(
            realestate_dataset=self.dataset_pool.get_dataset("building"),
            year=2000,
            occupied_spaces_variable="occupied_units",
            total_spaces_variable="total_units",
            target_attribute_name="target_vacancy",
            sample_from_dataset=self.dataset_pool.get_dataset("development_event_history"),
            dataset_pool=self.dataset_pool,
            resources=self.compute_resources,
        )

        self.assertEqual(results, None, "Nothing should've been added/developed")
Example #24
0
    def eval_hulls(self, x):
        lowerHull = self.lower_hull
        upperHull = self.upper_hull
        # lower bound
        lhVal = None
        if x < np.amin(np.array([h.left for h in lowerHull])):
            lhVal = -np.Inf
        elif x > np.amax(np.array([h.right for h in lowerHull])):
            lhVal = -np.Inf
        else:
            for h in lowerHull:
                left = h.left
                right = h.right

                if x >= left and x <= right:
                    lhVal = h.m * x + h.b
                    break

        # upper bound
        uhVal = None
        for h in upperHull:
            left = h.left
            right = h.right

            if x >= left and x <= right:
                uhVal = h.m * x + h.b
                break

        return lhVal, uhVal
def get_bttimbre(h5):
    """
    Get beat-aligned timbre from a song file of the Million Song Dataset
    INPUT:
       h5          - filename or open h5 file
    RETURN:
       bttimbre    - beat-aligned timbre, one beat per column
                     or None if something went wrong (e.g. no beats)
    """
    # if string, open and get timbre, if h5, get timbre
    if type(h5).__name__ == "str":
        h5 = GETTERS.open_h5_file_read(h5)
        timbre = GETTERS.get_segments_timbre(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
        h5.close()
    else:
        timbre = GETTERS.get_segments_timbre(h5)
        segstarts = GETTERS.get_segments_start(h5)
        btstarts = GETTERS.get_beats_start(h5)
        duration = GETTERS.get_duration(h5)
    # get the series of starts for segments and beats
    # NOTE: MAYBE USELESS?
    # result for track: 'TR0002Q11C3FA8332D'
    #    segstarts.shape = (708,)
    #    btstarts.shape = (304,)
    segstarts = np.array(segstarts).flatten()
    btstarts = np.array(btstarts).flatten()
    # aligned features
    bttimbre = align_feats(timbre.T, segstarts, btstarts, duration)
    if bttimbre is None:
        return None
    # done (no renormalization)
    return bttimbre
Example #26
0
def _create_eeg_el(ch, t=None):
    """Create an electrode definition, transform coords if necessary"""
    if ch["kind"] != FIFF.FIFFV_EEG_CH:
        raise RuntimeError("%s is not an EEG channel. Cannot create an " "electrode definition." % ch["ch_name"])
    if t is None:
        t = Transform("head", "head", np.eye(4))  # identity, no change
    if t.from_str != "head":
        raise RuntimeError("Inappropriate coordinate transformation")

    r0ex = _loc_to_eeg_loc(ch["loc"])
    if r0ex.shape[1] == 1:  # no reference
        w = np.array([1.0])
    else:  # has reference
        w = np.array([1.0, -1.0])

    # Optional coordinate transformation
    r0ex = apply_trans(t["trans"], r0ex.T)

    # The electrode location
    cosmag = r0ex.copy()
    _normalize_vectors(cosmag)
    res = dict(
        chname=ch["ch_name"],
        coil_class=FIFF.FWD_COILC_EEG,
        w=w,
        accuracy=_accuracy_dict["normal"],
        type=ch["coil_type"],
        coord_frame=t["to"],
        rmag=r0ex,
        cosmag=cosmag,
    )
    return res
Example #27
0
def display_ct(pt, norm, fr_Lz):
    glMatrixMode(GL_MODELVIEW)
    glPushMatrix()
    glTranslatef(pt[0], pt[1], pt[2])
    glBegin(GL_POINTS)
    glVertex3f(0.0, 0.0, 0.0)
    glEnd()
    glPushMatrix()
    glTranslatef(0.1, 0.1, 0.0)
    glScalef(0.004, 0.004, 0.004)
    display_string(fr_Lz)
    glPopMatrix()
    xaxis = numpy.array([1, 0, 0])
    norma = numpy.array([norm[i] for i in range(3)])
    rotaxis = numpy.cross(norma, xaxis)
    ang = numpy.arccos(numpy.dot(norma, xaxis))
    glRotatef(-ang * 180.0 / math.pi, rotaxis[0], rotaxis[1], rotaxis[2])
    #    ang = math.atan2(norm[1], norm[0]) * 360.0 / (2.0*3.141593)
    #    glRotatef(ang, 0.0, 0.0, 1.0)
    glBegin(GL_LINES)
    glVertex3f(0.0, 0.0, 0.0)
    glVertex3f(1.0, 0.0, 0.0)
    glEnd()
    glBegin(GL_TRIANGLES)
    glVertex3f(1.0, 0.0, 0.0)
    glVertex3f(0.8, 0.2, 0.0)
    glVertex3f(0.8, -0.2, 0.0)
    glEnd()
    glPopMatrix()
def computeDistanceMus21(melody_a, melody_b, parameters=["rhythm", "harmony", "contour"]):
    harmony_distance, rhythm_distance, contour_distance = 0, 0, 0

    if any("harmony" in parameters, "contour" in parameters):
        melody_a_midinotes = map(lambda x: x.midi, melody_a.pitches)
        melody_b_midinotes = map(lambda x: x.midi, melody_b.pitches)

    if "rhythm" in parameters:
        melody_a_durs = map(lambda x: x.duration.quarterLength, melody_a.flat.notes)
        melody_b_durs = map(lambda x: x.duration.quarterLength, melody_b.flat.notes)

    if "harmony" in parameters:
        harmony_distance, _ = DTW(melody_a_midinotes, melody_b_midinotes)

    if "rhythm" in parameters:
        rhythm_distance, _ = DTW(melody_a_durs, melody_b_durs)

    if "contour" in parameters:
        lick1_np = np.array(melody_a_midinotes)
        lick2_np = np.array(melody_b_midinotes)

        lick1_intervals = lick1_np[1:] - lick1_np[:-1]
        lick2_intervals = lick2_np[1:] - lick2_np[:-1]

        contour_distance, _ = DTW(lick1_intervals, lick2_intervals, intervalicDistance)

    return harmony_distance + rhythm_distance + contour_distance
Example #29
0
    def transform_to_alternate_representation(self):
        """
        Converts the board vectors to a different representation based on some metric
        """

        log = logging.getLogger(__name__)
        log.info("Transforming boards to alternate representation...")

        new_format = []
        self.games_flat = []
        total_games = len(self.games["boards"])
        for i, game in enumerate(self.games["boards"]):
            if i % 10 == 0:
                print("Transforming game %d of %d" % (i, total_games))

            g = [transform(board) for board in game]
            new_format.append(g)
            self.games_flat.extend(g)

        self.games["boards"] = new_format
        print("Completed transforming games")

        # Update the flat version as well
        self.games_flat = np.array(self.games_flat)
        out_labels = np.array([], dtype="uint32")
        print("Starting on labels")
        for label_set in self.games["moves"]:
            out_labels = np.append(out_labels, np.array(label_set))
        out_labels.flatten()
        self.labels_flat = out_labels
Example #30
0
File: SS.py Project: KuoHaoZeng/VH
def get_video_data_jukin(video_data_path_train, video_data_path_val, video_data_path_test):
    video_list_train = get_video_data_HL(video_data_path_train, video_feat_path)
    title = []
    fname = []
    for ele in video_list_train:
        batch_data = h5py.File(ele)
        batch_fname = batch_data["fname"]
        batch_title = batch_data["title"]
        for i in xrange(len(batch_fname)):
            fname.append(batch_fname[i])
            title.append(batch_title[i])
    video_list_val = get_video_data_HL(video_data_path_val, video_feat_path)
    for ele in video_list_val:
        batch_data = h5py.File(ele)
        batch_fname = batch_data["fname"]
        batch_title = batch_data["title"]
        for i in xrange(len(batch_fname)):
            fname.append(batch_fname[i])
            title.append(batch_title[i])
    video_list_test = get_video_data_HL(video_data_path_test, video_feat_path)
    for ele in video_list_test:
        batch_data = h5py.File(ele)
        batch_fname = batch_data["fname"]
        batch_title = batch_data["title"]
        for i in xrange(len(batch_fname)):
            fname.append(batch_fname[i])
            title.append(batch_title[i])
    fname = np.array(fname)
    title = np.array(title)
    video_data = pd.DataFrame({"Description": title})

    return video_data, video_list_train, video_list_val, video_list_test