Exemple #1
0
    def run(self, data, label):
        """
        Init samples
        for i in range(iterations):
            Update posterior distribution by gaussian process.
            Find the next sample maximizing acquisition function.
            Add new sample to the original samples.
        The last new sample is the best result.
        """

        # initial parameters
        samples, ys = self.init_samples(data, label, 10)
        optimal_y = np.min(ys)

        # define the gp
        gp = self.base_estimator
        for _ in range(self.max_iterations):

            # Fit gp on the hyperparameters.
            gp.fit(samples, ys)

            # Sample next hyperparameter.
            next_sample, y = self.propose_next_sample(data, label, optimal_y,
                                                      gp)

            # Update samples
            samples = np.r_([samples, next_sample])
            ys = np.r_([ys, y])
            optimal_y = np.min(ys)

        return samples, ys
Exemple #2
0
 def yanchang(self, nummber, yushu):
     x2 = self.x[:yushu]
     y2 = self.y[:yushu]
     z2 = self.z[:yushu]
     self.x = np.tile(self.x, nummber)
     self.y = np.tile(self.y, nummber)
     self.z = np.tile(self.z, nummber)
     self.x = np.r_(self.x, x2)
     self.y = np.r_(self.y, y2)
     self.z = np.r_(self.z, z2)
def PyWavelet(img,level=1, wavlet="db1", mode="sym"):
  #画像ファイルをグレースケールへ変換
  gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
  res = []
  tmp = gray.astype(np.uint16)
  res_pywt = pywt.wavedec2(gray,wavlet, level=level, mode=mode)
  print(res_pywt)
  res_kinji, (res_x, res_y, res_xy) = res_pywt
#マージ
  np.c_(tmp,res_x)
  np.r_(tmp,res_y)
  np.r_(tmp,res_xy)
  print("after merge")
  print(tmp)
  return tmp
    def transform(self, X):
        """
        Transform a set of images.

        Returns the features from each layer.

        Parameters
        ----------
        X : array-like, shape = [n_images, height, width, color]
                        or
                        shape = [height, width, color]

        Returns
        -------
        T : array-like, shape = [n_images, n_features]

            If force_reshape = False,
            list of array-like, length output_layers,
                                each shape = [n_images, n_windows,
                                              n_window_features]

            Returns the features extracted for each of the n_images in X..
        """
        X = check_tensor(X, dtype=np.float32, n_dim=4)
        if self.batch_size is None:
            if self.force_reshape:
                return self.transform_function(X.transpose(
                        *self.transpose_order))[0].reshape((len(X), -1))
            else:
                return self.transform_function(
                    X.transpose(*self.transpose_order))
        else:
            XT = X.transpose(*self.transpose_order)
            n_samples = XT.shape[0]
            for i in range(0, n_samples, self.batch_size):
                transformed_batch = self.transform_function(
                    XT[i:i + self.batch_size])
                # at first iteration, initialize output arrays to correct size
                if i == 0:
                    shapes = [(n_samples,) + t.shape[1:] for t in
                              transformed_batch]
                    ravelled_shapes = [np.prod(shp[1:]) for shp in shapes]
                    if self.force_reshape:
                        output_width = np.sum(ravelled_shapes)
                        output = np.empty((n_samples, output_width),
                                          dtype=transformed_batch[0].dtype)
                        break_points = np.r_([0], np.cumsum(ravelled_shapes))
                        raw_output = [
                            output[:, start:stop] for start, stop in
                            zip(break_points[:-1], break_points[1:])]
                    else:
                        output = [np.empty(shape,
                                           dtype=transformed_batch.dtype)
                                  for shape in shapes]
                        raw_output = [arr.reshape(n_samples, -1)
                                      for arr in output]

                for transformed, out in zip(transformed_batch, raw_output):
                    out[i:i + batch_size] = transformed
        return output
Exemple #5
0
    def KF_measurement(self, X_ins, P_k1_pr, r_i_gps, v_i_gps):
        # カルマンフィルタ観測更新

        r_i = np.array([[X_ins[0, 0]], [X_ins[1, 0]], [X_ins[2, 0]]])
        v_i = np.array([[X_ins[3, 0]], [X_ins[4, 0]], [X_ins[5, 0]]])

        # 残差計算
        d_r_i = r_i_gps - r_i
        d_v_i = v_i_gps - v_i
        y = np.r_(d_r_i, d_v_i)

        C = np.eye(6, 16)
        R = self.MeasureNoiCovMat()

        # カルマンゲインの算出
        PC_T = np.dot(P_k1_pr, C.T)
        invCPC_R = np.linalg.inv(np.dot(C, PC_T) + R)
        KalG = np.dot(PC_T, invCPC_R)

        # 誤差を推定
        X_k1_pl = np.dot(KalG, y)  # X^が基本常に0なので簡素化される

        # Pを算出
        P_k1_me = np.dot(np.eye(16) - np.dot(KalG, C), P_k1_pr)

        X_ins = X_ins + X_k1_pl
        return X_ins, KalG, P_k1_me
    def populate_gp_model(self,
                          observable,
                          lecs,
                          energy=None,
                          rescale=False,
                          fixvariance=0):
        """Creates a model based on given data and kernel.
        
        Args:
        observable - numpy array with observable. (1 row for each observable from each lec sample)
        lecs - numpy array with lec parameters fit should be done with regard to (lec 1 coloum 1 and so on, sample 1 on row 1 and so on)
        energy - energy values 
        """
        # Add row with energies to parameters for fit (c for col if that is that is the right way)
        if energy is not None:
            lecs = np.r_(lecs, energy)
        if rescale:
            (lecs, observable) = self.rescale(lecs, observable)
        lecs.transpose()

        observable.transpose()
        self.model = GPRegression(lecs, observable, self.kernel)

        self.model.Gaussian_noise.variance.unconstrain()

        self.model.Gaussian_noise.variance = fixvariance

        self.model.Gaussian_noise.variance.fix()
 def featureConnecting(self, inputFeaturePath):
     print('inputFeaturePath = ' + inputFeaturePath)
     inputFileHandler = open(inputFeaturePath, 'rb')
     inputTensor = pickle.load(inputFileHandler)
     print('inputTensor.shape = ' + str(inputTensor.shape))
     if self.outputTensorExists:
         self.outputTensor = np.r_(outputTensor, inputTensor)
     else:
         self.outputTensor = inputTensor
         self.outputTensorExists = 1
     print('outputTensor.shape = ' + str(outputTensor.shape))
def observ_step(action, sentences, adjusted_sentence, timestep):
    reward = 0.0
    for o in range(len(action)):
        if action[o] == 1:
            word = sentences[o][timestep]
            reward = 1.0
        if action[o] == 0:
            word = [0] * input_size
            reward = 0.0
        np.r_(adjusted_sentence[o], word)
    done = False
    with tf.variable_scope('lstm', reuse=tf.AUTO_REUSE):
        basic_cell = rnn.BasicLSTMCell(hidden_size)
        outputs, states = tf.nn.dynamic_rnn(basic_cell,
                                            adjusted_sentence,
                                            dtype=tf.float32)
    observ = outputs[:, -1, :]
    with tf.Session() as sess:
        sess = tf.Session()
        sess.run(tf.initialize_all_variables())
        obs = sess.run(observ)
    if timestep >= 27:
        done = True
    return obs, reward, done
Exemple #9
0
    def NoiseCovMat(self, X_ins, T):
        # プロセスノイズの共分散行列を計算
        # 各成分の式展開は『搬送波位相DGPS/INS 複合航法アルゴリズムの開発』を参考にした.URL:https://repository.exst.jaxa.jp/dspace/handle/a-is/32573

        qbi = np.array([[X_ins[6, 0]], [X_ins[7, 0]], [X_ins[8, 0]],
                        [X_ins[9, 0]]])

        q1 = qbi[0, 0]
        q2 = qbi[1, 0]
        q3 = qbi[2, 0]
        q4 = qbi[3, 0]

        Q = 0.5 * np.array([[q4, -q3, q2], [q3, q4, -q1], [-q2, q1, q4],
                            [-q1, -q2, -q3]])

        Cbi = self.Quate_ToDCM(qbi)

        # 時間積分時の近似値(指数で与えてもほとんど問題ないと思われる.参考論文は90年代のもの)
        T1g = T * (1 - T / self.Tau_g + 2 / 3 * (T / self.Tau_g)**2)
        T1a = T * (1 - T / self.Tau_a + 2 / 3 * (T / self.Tau_a)**2)
        T2g = T**2 * (1 / 2 - 1 / 3 * T / self.Tau_g + 1 / 6 *
                      (T / self.Tau_g)**2)
        T2a = T**2 * (1 / 2 - 1 / 3 * T / self.Tau_a + 1 / 6 *
                      (T / self.Tau_a)**2)
        T3 = 1 / 3 * T**3

        I_33 = np.eye(3)
        Ze33 = np.zeros(3, 3)
        Ze34 = np.zeros(3, 4)
        Ze43 = np.zeros(4, 3)

        Q_22 = T3 * self.Wno_a * Cbi.T
        Q_25 = T2a * self.Wno_a * Cbi
        Q_33 = T3 * self.Wno_g * np.dot(Q, Q.T)
        Q_34 = T2g * self.Wno_g * Q
        Q_43 = T2g * self.Wno_g * Q.T
        Q_44 = T1g * self.Wno_g * I_33
        Q_52 = T2a * self.Wno_a * Cbi.T
        Q_55 = T1a * self.Wno_a * I_33

        Q_noise = np.r_(np.c_(Ze33, Ze33, Ze34, Ze33, Ze33),
                        np.c_(Ze33, Q_22, Ze34, Ze33, Q_25),
                        np.c_(Ze43, Ze43, Q_33, Q_34, Ze43),
                        np.c_(Ze33, Ze33, Q_43, Q_44, Ze33),
                        np.c_(Ze33, Q_52, Ze34, Ze33, Q_55))

        return Q_noise
Exemple #10
0
    def KF_System(self, a, w, X_ins):
        # カルマンフィルタのシステム行列算出
        # Fは,INS誤差のシステム行列

        r_i = np.array([[X_ins[0, 0]], [X_ins[1, 0]], [X_ins[2, 0]]])
        v_i = np.array([[X_ins[3, 0]], [X_ins[4, 0]], [X_ins[5, 0]]])
        qbi = np.array([[X_ins[6, 0]], [X_ins[7, 0]], [X_ins[8, 0]],
                        [X_ins[9, 0]]])
        wBias = np.array([[X_ins[10, 0]], [X_ins[11, 0]], [X_ins[12, 0]]])
        aBias = np.array([[X_ins[13, 0]], [X_ins[14, 0]], [X_ins[15, 0]]])

        w_b = w - wBias
        a_b = a - aBias

        r = np.linalg.norm(r_i)
        x = r_i[0, 0]
        y = r_i[1, 0]
        z = r_i[2, 0]

        q1 = qbi[0, 0]
        q2 = qbi[1, 0]
        q3 = qbi[2, 0]
        q4 = qbi[3, 0]

        wx = w_b[0, 0]
        wy = w_b[1, 0]
        wz = w_b[2, 0]

        ax = a_b[0, 0]
        ay = a_b[1, 0]
        az = a_b[2, 0]

        I_33 = np.eye(3)
        Ze33 = np.zeros(3, 3)
        Ze34 = np.zeros(3, 4)
        Ze43 = np.zeros(4, 3)

        G = 3 * self.Mu / (r**5) * np.array(
            [x * x, x * y, x * z], [x * y, y * y, y * z],
            [x * z, y * z, z * z]) - self.Mu / (r**3) * I_33
        Ome = 0.5 * np.array([[0, wz, -wy, wx], [-wz, 0, wx, wy],
                              [wy, -wx, 0, wz], [-wx, -wy, -wz, 0]])
        Q = 0.5 * np.array([[q4, -q3, q2], [q3, q4, -q1], [-q2, q1, q4],
                            [-q1, -q2, -q3]])
        Rbar = np.c_[2 * Q, -qbi]
        Abar = np.array([[0, -az, ay], [az, 0, -ax], [-ay, ax, 0],
                         [-ax, -ay, -az]])
        Cbi = self.Quate_ToDCM(qbi)

        D = np.dot(2 * Cbi, np.dot(Abar.T, Rbar.T))

        Fbg = -1 / self.Tau_g * I_33
        Fba = -1 / self.Tau_a * I_33

        F = np.r_(np.c_(Ze33, I_33, Ze34, Ze33, Ze33),
                  np.c_(G, Ze33, D, Ze33, Cbi), np.c_(Ze43, Ze43, Ome, Q,
                                                      Ze43),
                  np.c_(Ze33, Ze33, Ze34, Fbg, Ze33),
                  np.c_(Ze33, Ze33, Ze34, Ze33, Fba))

        return F
 def nonzip(self):
     print np.r_(self.x_array, self.y_array, self.cl_array)
     print np.c_(self.x_array, self.y_array, self.cl_array)
    def transform(self, X):
        """
        Transform a set of images.

        Returns the features from each layer.

        Parameters
        ----------
        X : array-like, shape = [n_images, height, width, color]
                        or
                        shape = [height, width, color]

        Returns
        -------
        T : array-like, shape = [n_images, n_features]

            If force_reshape = False,
            list of array-like, length output_layers,
                                each shape = [n_images, n_windows,
                                              n_window_features]

            Returns the features extracted for each of the n_images in X..
        """
        X = check_tensor(X, dtype=np.float32, n_dim=4)
        if self.batch_size is None:
            if self.force_reshape:
                return self.transform_function(
                    X.transpose(*self.transpose_order))[0].reshape(
                        (len(X), -1))
            else:
                return self.transform_function(
                    X.transpose(*self.transpose_order))
        else:
            XT = X.transpose(*self.transpose_order)
            n_samples = XT.shape[0]
            for i in range(0, n_samples, self.batch_size):
                transformed_batch = self.transform_function(
                    XT[i:i + self.batch_size])
                # at first iteration, initialize output arrays to correct size
                if i == 0:
                    shapes = [(n_samples, ) + t.shape[1:]
                              for t in transformed_batch]
                    ravelled_shapes = [np.prod(shp[1:]) for shp in shapes]
                    if self.force_reshape:
                        output_width = np.sum(ravelled_shapes)
                        output = np.empty((n_samples, output_width),
                                          dtype=transformed_batch[0].dtype)
                        break_points = np.r_([0], np.cumsum(ravelled_shapes))
                        raw_output = [
                            output[:, start:stop] for start, stop in zip(
                                break_points[:-1], break_points[1:])
                        ]
                    else:
                        output = [
                            np.empty(shape, dtype=transformed_batch.dtype)
                            for shape in shapes
                        ]
                        raw_output = [
                            arr.reshape(n_samples, -1) for arr in output
                        ]

                for transformed, out in zip(transformed_batch, raw_output):
                    out[i:i + batch_size] = transformed
        return output
Exemple #13
0
                    print "Counter", counter


if __name__ == '__main__':

    carbons_init_list = [[2, 5], [1, 5]]
    # carbons_init_list = [[1,5]]
    pulses = 1

    DD_scheme = 'auto'
    el_after_init = 1
    optimize = True
    ssrocalib = True
    debug = True
    # FET = [0.015/(2.*pulses)]
    FET = np.r_(0.01, 0.5)
    wait_gate = True

    logic_state_list = ['mX', 'pX']
    # logic_state_list = ['mX']

    debug = False
    sleep = 1
    full_Tomo_basis_list = ([['X', 'X'], ['Y', 'Y'], ['Z', 'Z']])
    full_Tomo_basis_list = ([['X', 'X'], ['Y', 'Y'], ['Z', 'Z']])
    full_Tomo_basis_list = ([['X', 'X']])
    # full_Tomo_basis_list = ([['X','X']])

    # full_Tomo_basis_list = ([['DFS']])

    # el_RO_list = ['positive']
def visible(mesh: Mesh.Mesh, r: np.ndarray) -> np.ndarray:
    rotated_vertices = r.dot(
        np.hstack([mesh.vertices,
                   np.ones([len(mesh.vertices), 1])]).T).T[:, :3]
    z = rotated_vertices[:, 2]
    uv = rotated_vertices[:, :]
    uv[:, 0] = uv - np.min(uv[:, 0], axis=0)
    uv = uv + 1
    uv = uv / np.max(uv) * 1000
    width = 1000
    height = 1000
    faces = np.array(mesh.tvi)
    v1 = faces[:, 0]
    v2 = faces[:, 1]
    v3 = faces[:, 2]
    nfaces = np.shape(faces)[0]
    x = np.c_[uv[v1, 0], uv[v2, 0], uv[v3, 0]]
    y = np.c_[uv[v1, 1], uv[v2, 1], uv[v3, 1]]
    minx = np.ceil(x.min(1))
    maxx = np.floor(x.max(1))
    miny = np.ceil(y.min(1))
    maxy = np.floor(y.max(1))

    del x, y

    minx = np.clip(minx, 0, width - 1)
    maxx = np.clip(maxx, 0, width - 1)
    miny = np.clip(miny, 0, height - 1)
    maxy = np.clip(maxy, 0, height - 1)

    [rows, cols] = np.meshgrid(np.linspace(1, 1000, num=1000),
                               np.linspace(1, 1000, num=1000))
    zbuffer = -np.inf(height, width)
    fbuffer = np.zeros(height, width)

    for i in range(nfaces):
        if minx[i] <= maxx[i] and miny[i] <= maxy[i]:
            px = rows[miny[i]:maxy[i], minx[i]:maxx[i]]
            py = cols[miny[i]:maxy[i], minx[i]:maxx[i]]
            px = px[:]
            py = py[:]

            e0 = uv[v1[i], :]
            e1 = uv[v2[i], :]
            e2 = uv[v3[i], :]

            det = e1[0] * e2[1] - e1[1] * e2[0]
            tmpx = px - e0[0]
            tmpy = py - e0[1]
            a = (tmpx * e2[1] - tmpy * e2[0]) / det
            b = (tmpx * e1[0] - tmpy * e1[1]) / det

            test = a >= 0 & b >= 0 & a + b <= 1

            if np.any(test):
                px = px[test]
                py = py[test]

                w2 = a[test]
                w3 = b[test]
                w1 = 1 - w3 - w2
                pz = z[v1[i]] * w1 + z[v2[i]] * w2 + z[v3[i]] * w3

                if pz > zbuffer[py, px]:
                    zbuffer[py, px] = pz
                    fbuffer[py, px] = i
    test = fbuffer != 0
    f = np.unique(fbuffer[test])
    v = np.unique(np.r_(v1[f], v2[f], v3[f]))
    return v
Exemple #15
0
def sequential_stream_track(q_len,
                            traces,
                            access_type,
                            seq_size_threshold=None):
    '''
 [n_seq_cmd, n_seq_stream, n_read_cmd, n_total_cmd,size_dist,seq_stream_length_count,seq_stream_length_count_limited,ex_record]=sequential_stream_track(q_len, traces, access_type,seq_size_threshold)
 calculate the near sequential stream's stack
 
 input: 
   q_len: designed queue length 
   traces: nx3 matrix for IO events (start_lba, size, access mode)
   access_type: decide if only consider read 1/write 0 or combine 2
   seq_size_threshold: the threshold to role in sequential stream, i.e., if
   the stream size >=seq_size_threshold, the stream will be counted as a
   sequential stream
 output: 
   n_seq_cmd: number of sequential commands
   n_seq_stream: number of sequential streams
   n_read_cmd: number of read commands
   n_total_cmd: number of total commands
   size_dist: request size distribution for sequential commands
   seq_stream_length_count: 1 value at array index i corresponding to the number/frequecy of commands with sequence command length =i; 2: total request size in this index; --> 2/1 average request size
   seq_stream_length_count_limited: similar to above with constraint seq_size_threshold; only the stream request size is >= seq_size_threshold, it is counted. --> less than above
   ex_record:  record for contrained cmd and stream number

 Author: [email protected]
    '''
    # access_type: decide if only consider read 1/write 0 or combine 2

    total_cmd, b = shape(traces)
    queue_index = 0

    print('Sequence analysis: Queue length =' + str(q_len) +
          ' and access type =' + str(access_type))

    LRU_queue = -ones((q_len, 4), dtype=uint64)

    seq_cmd_count = 0
    seq_stream_count = 0
    max_stream_length = 1024
    seq_stream_length_count = zeros((max_stream_length, 2), dtype=uint32)
    seq_stream_length_count_limited = zeros((max_stream_length, 2),
                                            dtype=uint32)

    max_size = max(traces[:, 1])
    size_dist = zeros((max_size, 1))

    idx_read = nonzero(traces[:, 2] == 1)
    read_cmd_count = shape(idx_read)[1]

    ex_record = ex_record_class(0, 0)
    #    ex_record.cmd_number = (0)
    #    ex_record.stream_number = (0)

    for cmd_id in arange(0, total_cmd).reshape(-1):
        #Get the trace information
        start_lba = traces[cmd_id, 0]
        end_lba = traces[cmd_id, 0] + traces[cmd_id, 1] - 1
        access_mode = traces[cmd_id, 2]
        #Here, only read or write?
        if (access_type == 0):
            if access_mode == 1:
                continue
        else:
            if access_type == 1:
                if access_mode == 0:
                    continue
        #We scan through the LRU queue to see whether this command can be connected
        #to a sequential stream
        find_sequential = 0
        for q_i in arange(0, queue_index).reshape(-1):
            if start_lba == LRU_queue[q_i, 1] + 1:
                if LRU_queue[q_i, 2] == 1:
                    seq_cmd_count = seq_cmd_count + 1
                    LRU_queue[q_i, 3] = LRU_queue[q_i, 3] + 1
                    size_dist[traces[cmd_id, 1] -
                              1] = size_dist[traces[cmd_id, 1] - 1] + 1
                else:
                    LRU_queue[q_i, 2] = 1
                    LRU_queue[q_i, 3] = 2
                    seq_stream_count = seq_stream_count + 1
                    seq_cmd_count = seq_cmd_count + 2
                    first_cmd_length = int(LRU_queue[q_i, 1] -
                                           LRU_queue[q_i, 0] + 1)
                    size_dist[first_cmd_length -
                              1] = size_dist[first_cmd_length - 1] + 1
                    size_dist[traces[cmd_id, 1] -
                              1] = size_dist[traces[cmd_id, 1] - 1] + 1
                find_sequential = 1
                LRU_queue[q_i, 1] = end_lba
                break
        #######################################################################
        # if there is no sequential stream for attaching.
        if logical_not(find_sequential):
            queue_index = queue_index + 1
            if queue_index > q_len:
                queue_index = (q_len)
                if LRU_queue[0, 3] > max_stream_length:
                    seq_stream_length_count = (r_[seq_stream_length_count,
                                                  zeros((LRU_queue[0, 3] -
                                                         max_stream_length, 2),
                                                        dtype=uint32)])
                    seq_stream_length_count_limited = (
                        r_[seq_stream_length_count_limited,
                           zeros((LRU_queue[0, 3] - max_stream_length, 2),
                                 dtype=uint32)])
                    max_stream_length = LRU_queue[0, 3]
                #             # used to record the exceptation over 1024
            #             if LRU_queue(1,4)>1024
            #                 ex_record.number=ex_record.number+1;
            #             end
                idx = int(LRU_queue[0, 3])

                if idx > 0:
                    #print(idx)
                    seq_stream_length_count[
                        idx - 1, 0] = seq_stream_length_count[idx - 1, 0] + 1
                    seq_stream_length_count[
                        idx - 1, 1] = seq_stream_length_count[
                            idx - 1, 1] + LRU_queue[0, 1] - LRU_queue[0, 0] + 1
#                    seq_stream_length_count[idx,0]=seq_stream_length_count[idx,0] + 1
#                    seq_stream_length_count[idx,1]=seq_stream_length_count[idx,1] + LRU_queue[0,1] - LRU_queue[0,0] + 1

                if (idx > 0) and (LRU_queue[0, 1] - LRU_queue[0, 0] + 1 >=
                                  seq_size_threshold):
                    seq_stream_length_count_limited[
                        idx - 1,
                        0] = seq_stream_length_count_limited[idx - 1, 0] + 1
                    seq_stream_length_count_limited[
                        idx - 1, 1] = seq_stream_length_count_limited[
                            idx - 1, 1] + LRU_queue[0, 1] - LRU_queue[0, 0] + 1
                    #                    seq_stream_length_count_limited[idx,0]=seq_stream_length_count_limited[idx,0] + 1
                    #                    seq_stream_length_count_limited[idx,1]=seq_stream_length_count_limited[idx,1] + LRU_queue[0,1] - LRU_queue[0,0] + 1
                    ex_record.cmd_number = (ex_record.cmd_number + idx)

                LRU_queue[0:queue_index - 1, :] = LRU_queue[1:queue_index, :]
            LRU_queue[queue_index - 1, 0] = start_lba
            LRU_queue[queue_index - 1, 1] = end_lba
            LRU_queue[queue_index - 1, 2] = 0
            LRU_queue[queue_index - 1, 3] = 0

    for j in arange(0, q_len).reshape(-1):
        if LRU_queue[j, 3] > max_stream_length:
            seq_stream_length_count = (r_([
                seq_stream_length_count,
                zeros((LRU_queue[j, 3] - max_stream_length, 2))
            ]))
            seq_stream_length_count_limited = (
                r_[seq_stream_length_count_limited,
                   zeros((LRU_queue[j, 3] - max_stream_length, 2))])
            max_stream_length = LRU_queue[j, 3]

        ## need to debug the following lines
    #     if LRU_queue(j,4)>0
    #         seq_stream_length_count(LRU_queue(j,4),1)=seq_stream_length_count(LRU_queue(j,4),1)+1;
    #         seq_stream_length_count(LRU_queue(j,4),2)=seq_stream_length_count(LRU_queue(j,4),2)+1+LRU_queue(q_i, 2)-LRU_queue(q_i, 1);
    #     end
    #     if (LRU_queue(q_i,4)>0) && (LRU_queue(q_i, 2)-LRU_queue(q_i, 1)+1>=seq_size_threshold)
    #         seq_stream_length_count_limited(LRU_queue(q_i,4),1)=seq_stream_length_count_limited(LRU_queue(q_i,4),1)+1;
    #         seq_stream_length_count_limited(LRU_queue(q_i,4),2)=seq_stream_length_count_limited(LRU_queue(q_i,4),2)+1+LRU_queue(q_i, 2)-LRU_queue(q_i, 1);
    #         ex_record.cmd_number=ex_record.cmd_number+LRU_queue(q_i,4);
    #     end

    n_seq_cmd = copy(seq_cmd_count)
    n_seq_stream = copy(seq_stream_count)
    n_read_cmd = copy(read_cmd_count)
    n_total_cmd = copy(total_cmd)
    ex_record.stream_number = (sum(seq_stream_length_count_limited[:, 0]))

    return n_seq_cmd, n_seq_stream, n_read_cmd, n_total_cmd, size_dist, seq_stream_length_count, seq_stream_length_count_limited, ex_record
Exemple #16
0
    levels = rvect("list", length(levels))
    names(levels) = levels(conditions)

    for l in levels(conditions):
        levels[[l]] = which(conditions == l)
        if length(levels[[l]]) < 2:
            stop("condition level '",l,"' has less than two replicates")

    # end sanity check
    if verbose == True:
        print("sanity check complete")

    # Summarize the relative abundance (rab) win and all groups

    rab = rvect( "list", 3 )
    names(rab) = np.r_( "all", "win", "spl" )
    rab$win = tuple()

    #this is the median value across all monte carlo replicates
    cl2p = NULL
    for m in getMonteCarloInstances(clr):
        cl2p = cbind(cl2p, m)

    rab$all = t(apply(cl2p, 1, median))
    rm(cl2p)
    gc()

    if verbose == True
        print("rab.all  complete")

    #this is the median value across all monte carlo replicates per level
Exemple #17
0
def multilayer_extraction(edgelist, seed, min_score, prop_sample):
    m = max(list(edgelist['layer']))
    n = len(np.unique(edgelist['node1'].append(edgelist['node2'])))

    # Calculate the Modularity Matrix
    # Use Expectation_CM
    print("Estimation Stage")
    mod_mat = expCM.expectation_CM(edgelist)

    # Initialize the Communities
    print("Initialization Stage")
    initial_set = None
    for i in range(0, m + 1):
        graph = nx.parse_edgelist(
            format_edgelist.format_edgelist(edgelist[edgelist['layer'] == i]))
        if i == 0:
            initial_set = init.initialization(graph, prop_sample, m, n)
        else:
            initial_set.append(init.initialization(graph, prop_sample, m, n))

    # Search Across Initial Sets
    print("Search Stage")

    print("Seaching over",
          len(initial_set['vertex_set']),
          "seed sets \n",
          sep=" ")
    results_temp = pd.DataFrame()
    k = len(initial_set['vertex_set'])

    # Detect number of cores present on your machine
    cores = mp.cpu_count()
    for i in range(0, k):
        starter = pd.DataFrame()
        starter['vertex_set'] = initial_set["vertex_set"][i]
        if len(starter["vertex_set"] < 2):
            starter["vertex_set"] = np.r_(
                starter["vertex_set"],
                list(set(range(1, n)) - set(starter["vertex_set"]))[i])
        starter["layer_set"] = initial_set["layer_set"][i]
        temp = single_swap(starter, mod_mat, m, n)
        results_temp.extend(temp)

    print("Cleaning Stage")
    if len(results_temp) < 1:
        return "No Community Found"

    scores = np.repeat(0, len(results_temp))

    for i in range(1, len(results_temp)):
        if len(results_temp["layer_set"][i]) == 0:
            scores[i] = -1000
        if len(results_temp["layer_set"][i]) > 0:
            scores[i] = results_temp[i]

    scores = round(scores[5])
    indx = np.where(np.unique(scores))
    indx2 = np.where(scores > min_score)
    results2 = results_temp[list(set(indx).intersection(indx2))]

    betas = list()
    mean_score = list()
    number_communities = list()
    results3 = list()
    if len(results2) <= 0:
        results = None
        return None
    elif len(results2) > 0:
        betas = [1] * 100
        number_communities = [0] * len(betas)
        mean_score = [0] * len(betas)
        for i in range(0, len(betas)):
            temp = None  # Cleanup
            results3[i] = None  # List of betas and communities
            mean_score[i] = None  # mean score of temp list
            number_communities[i] = None  # length of temp communities

    z = pd.DataFrame({
        "Betas": betas,
        "Mean Score": mean_score,
        "Number Communities": number_communities
    })
    multi = pd.DataFrame({"Community List": results3, "Diagnostics": z})

    return multi
Exemple #18
0
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull

points = np.random.rand(30, 2)
hull = ConvexHull(points)
plt.plot(points[:, 0], points[:, 1], 'o')
for simplex in hull.simplices:
    plt.plot(points[simplex, 0], points[simplex, 1], 'k-')
plt.show()

plt.scatter()

data = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
data.reset_index()
data.sort_values()

a1 = [1, 2, 3]
a2 = [4, 5, 6]
np.r_(a1, a2)
np.concatenate()
np.append()
np.stack()
np.hstack()
pd.cut()
Exemple #19
0
import numpy as np
import skfuzzy as fuzz

# taking fuzzy sets A and B as user inputs

print("Enter elements of A")
a_x = input()
a_x = np.r_(a_x)
# print(a_x)

print("Enter memership values of corresponding elements of A")
a_mx = input()
a_mx = np.r_(a_mx)
# print(a_mx)

print("Enter elements of B")
b_x = input()
b_x = np.r_(b_x)
# print(b_x)

print("Enter memership values of corresponding elements of B")
b_mx = input()
b_mx = np.r_(b_mx)
# print(b_mx)

# LHS wrong
m, mfm = fuzz.fuzzymath.fuzzy_or(a_x, a_mx, b_x, b_mx)
print("Union:")
print(m)
print(mfm)
'''