コード例 #1
0
ファイル: pure_monte_carlo.py プロジェクト: roryhr/sandbox
def print_board(board: np.ndarray):
    """Print out the board representation in sexy ASCII...
    
    print_board(np.array([[1,0,-1], [0,0,0], [1,-1,0]]))
    
    X |   | O 
    ----------
      |   | 
    ---------
    X | O | 
    """

    li_board = board.tolist()
    print('')
    for i, row in enumerate(li_board):
        t = str(row)
        t = t.replace('[', ' ')
        t = t.replace(']', '')
        t = t.replace(',', ' |')
        t = t.replace('-1', 'O')
        t = t.replace('1', 'X')
        t = t.replace('0', ' ')

        print(t)
        if i < 2:
            print('-----------')
コード例 #2
0
def c_int_ext(k: int, aff: np.ndarray, adj_mat: np.ndarray, threads_nb=1) -> float:
    """
    This function calculates the inter/intra-cluster density
    as defined in Santo Fortunato, Community Detection in Graphs, Physics Reports, 486, 75-174(2010)
    Parameters
    ----------
    k : int
        The number of clusters
    aff : np.ndarray
        A 1-D array contains the affectation of nodes to their clusters
    adj_mat : np.ndarray
        Adjacency matrix
    Returns
    -------
    float, float
        The value of sum(sigma_int), sum(sigma_ext)
        which is the quality of the clustering.
    """
    global int_sigmas
    global ext_sigmas
    # initiate to zeros
    int_sigmas = np.zeros(k)
    ext_sigmas = np.zeros(k)
    # Get the number of nodes
    n = len(aff)
    # Calculates the internal and external edges
    # for each cluster
    threads = []  # type: list[Thread]
    # if threads number is too large then update it
    if n / 10 < threads_nb:
        threads_nb = int(n / 10)
    # create threads instances
    for i in range(threads_nb):
        from_i = int(i * (n / threads_nb))
        to_i = int((i + 1) * (n / threads_nb))
        t = Thread(target=calculate_int_ext_edges, args=(adj_mat, aff, from_i, to_i))
        threads.append(t)
        threads[i].start()
    # Wait fo threads to finish
    for t in threads:
        t.join()
    # Transform aff from np.ndarray to list
    # to be able to use the count function
    aff = aff.tolist() # type: list
    # Calculates the density for each cluster
    for i in range(k):
        nb_c_i = aff.count(i)
        if nb_c_i <= 1:
            int_sigmas[i] = 0
            ext_sigmas[i] = 0
        else:
            int_sigmas[i] /= (nb_c_i * (nb_c_i - 1) / 2)
            ext_sigmas[i] /= (nb_c_i * (n - nb_c_i))
    # Return the density for all the clusters
    return sum(int_sigmas) / k, sum(ext_sigmas) / k
    def get_summary_stats(self, locinames: np.ndarray, num_ids: int,
                          max_MOI: int):
        '''
        Returns dataframes containing the summary statistics of the algorithm
        run
        :return: A tuple of (posterior_df, summary_stats_df)
        '''
        num_loci = locinames.size
        # TODO: What does this mean?
        # Find the mode of the hidden alleles on the first/last day
        modealleles = np.zeros((2 * num_ids, max_MOI * num_loci))
        for i in range(num_ids):
            for j in range(num_loci):
                modealleles[2 * i,
                            j * max_MOI:(j + 1) * max_MOI] = sp_stats.mode(
                                self.alleles0[i, j * max_MOI:(j + 1) *
                                              max_MOI, :],
                                axis=1)[0].ravel()

                modealleles[2 * i + 1,
                            j * max_MOI:(j + 1) * max_MOI] = sp_stats.mode(
                                self.allelesf[i, j * max_MOI:(j + 1) *
                                              max_MOI, :],
                                axis=1)[0].ravel()

        # TODO: Combined what? Why is this doubled?
        temp_combined = np.repeat(
            np.mean(self.classification, axis=1)[:num_ids], 2)
        # Reshape into a single column
        temp_combined = temp_combined.reshape(2 * num_ids, 1)
        posterior_matrix = np.concatenate((temp_combined, modealleles), axis=1)
        posterior_matrix_columns = [[f"{locus}_{i+1}" for i in range(max_MOI)]
                                    for locus in locinames]
        posterior_matrix_columns = np.array(
            posterior_matrix_columns).flatten().tolist()
        posterior_matrix_columns.insert(0, "Prob Rec")
        posterior_df = pd.DataFrame(posterior_matrix,
                                    columns=posterior_matrix_columns)

        # TODO: Understand what's being printed here? Separate out into its own
        # function?
        summary_statisticsmatrix = np.concatenate(
            (np.mean(self.parameters, axis=1).reshape(
                -1, 1), np.quantile(self.parameters, (0.25, 0.75), axis=1).T),
            axis=1)
        summary_statisticsmatrix = np.concatenate(
            (summary_statisticsmatrix,
             np.append(
                 np.quantile(self.parameters[2 + num_loci:, :], (0.25, 0.75)),
                 np.mean(self.parameters[2 + num_loci:, :]),
             ).reshape(1, -1)))
        summary_statisticsmatrix = np.array([
            f"{summary_statisticsmatrix[i,0]:.3f} ({summary_statisticsmatrix[i,1]:.3f}, {summary_statisticsmatrix[i,2]:.3f})"
            for i in range(summary_statisticsmatrix.shape[0])
        ])
        summary_statistics_df = pd.DataFrame(summary_statisticsmatrix,
                                             index=[
                                                 "q", "d", *locinames.tolist(),
                                                 *locinames.tolist(),
                                                 "Mean diversity"
                                             ])

        return posterior_df, summary_statistics_df
コード例 #4
0
ファイル: util.py プロジェクト: maxsolomonhenry/amp_mod
def np2matlab(input_: np.ndarray):
    """
    Convert numpy array to Matlab double.
    """
    return matlab.double(input_.tolist())[0]
コード例 #5
0
ファイル: models.py プロジェクト: nickvandewiele/lending_club
 def _post_process(self, prediction: np.ndarray) -> LoanPredictionResult:
     logger.debug("Post-processing prediction.")
     result = prediction.tolist()[0]
     lpp = LoanPredictionResult(result=result)
     return lpp
コード例 #6
0
ファイル: AI_expectimax.py プロジェクト: GMouYes/AI_Proj
 def add_state(self, state: np.ndarray):
     if hash(str(state.tolist())) not in self.states:
         new_state = StateNode(state, self)
         new_state.depth = self.depth + 1
         self.states.update({new_state.__hash__(): new_state})
コード例 #7
0
def _numpy_array_repr(arr: np.ndarray) -> str:
    return 'np.array({!r})'.format(arr.tolist())
コード例 #8
0
ファイル: keypoint.py プロジェクト: cm107/common_utils
 def from_numpy(cls, arr: np.ndarray) -> Keypoint2D:
     return cls.from_list(arr.tolist())
コード例 #9
0
def dump_array(array: np.ndarray, fn='dump.txt'):
    with open(fn, 'w', encoding='utf8') as f:
        json.dump(array.tolist(), f)
コード例 #10
0
ファイル: mnist.py プロジェクト: referenceai/lib
 def inverse_transform_one(self, labels : np.ndarray, t : Transform) -> int:
     labels = labels[0]
     idx = labels.tolist().index(max(labels))
     labels = [0]*len(labels)
     labels[idx] = 1
     return int(t.encoder.inverse_transform([labels])[0][0])
コード例 #11
0
def array_to_list(array: np.ndarray) -> List:
    """Returns an array as list"""
    return array.tolist()
コード例 #12
0
ファイル: dataset.py プロジェクト: geffy/ebonite
 def serialize(self, instance: np.ndarray):
     # if self.shape == 1:
     #     return [instance.tolist()]  # TODO better shapes
     return instance.tolist()
コード例 #13
0
 def _get_prediction_dict(array: np.ndarray) -> dict:
     return dict(instances=array.tolist(), signature_name='serving_default')
コード例 #14
0
ファイル: graph.py プロジェクト: hyzcn/tf_G
  def __init__(self, sess: tf.Session, name: str,
               writer: tf.summary.FileWriter = None,
               edges_np: np.ndarray = None, n: int = None,
               is_sparse: bool = False) -> None:
    """ Class Constructor of the Graph

    This method is called to construct a Graph object. This block of code
    initializes all the variables necessaries for this class to properly works.

    This class can be initialized using an edge list, that fill the graph at
    this moment, or can be construct it from the cardinality of vertices set
    given by `n` parameter.

    Args:
      sess (:obj:`tf.Session`): This attribute represents the session that runs
        the TensorFlow operations.
      name (str): This attribute represents the name of the object in
        TensorFlow's op Graph.
      writer (:obj:`tf.summary.FileWriter`, optional): This attribute represents
        a TensorFlow's Writer, that is used to obtain stats. The default value
        is `None`.
      edges_np (:obj:`np.ndarray`, optional): The edge set of the graph codifies
        as `edges_np[:,0]` represents the sources and `edges_np[:,1]` the
        destinations of the edges. The default value is `None`.
      n (int, optional): Represents the cardinality of the vertex set. The
        default value is `None`.
      is_sparse (bool, optional): Use sparse Tensors if it's set to `True`. The
        default value is False` Not implemented yet. Show the Todo for more
        information.

    Todo:
      * Implement variables as sparse when it's possible. Waiting to
        TensorFlow for it.

    """
    TensorFlowObject.__init__(self, sess, name, writer, is_sparse)
    UpdateEdgeNotifier.__init__(self)

    if edges_np is not None:
      if n is not None:
        self.n = max(n, int(edges_np.max(axis=0).max() + 1))
      else:
        self.n = int(edges_np.max(axis=0).max() + 1)
      self.m = int(edges_np.shape[0])
      A_init = tf.scatter_nd(edges_np.tolist(), self.m * [1.0],
                             [self.n, self.n])
    elif n is not None:
      self.n = n
      self.m = 0
      A_init = tf.zeros([self.n, self.n])
    else:
      raise ValueError('Graph constructor must be have edges or n')

    self.n_tf = tf.Variable(float(self.n), tf.float32,
                            name=self.name + "_n")
    self.A_tf = tf.Variable(A_init, tf.float64,
                            name=self.name + "_A")
    self.out_degrees_tf = tf.Variable(
      tf.reduce_sum(self.A_tf, 1, keep_dims=True),
      name=self.name + "_d_out")
    self.in_degrees_tf = tf.Variable(
      tf.reduce_sum(self.A_tf, 0, keep_dims=True),
      name=self.name + "_d_in")
    self.run_tf(tf.variables_initializer([self.A_tf, self.n_tf]))
    self.run_tf(tf.variables_initializer([
      self.out_degrees_tf, self.in_degrees_tf]))
コード例 #15
0
def _(X: np.ndarray, discount: float) -> torch.Tensor:
    output = discounted_sum(X.tolist(), discount=discount)
    return np.array(output, dtype=np.float32)
コード例 #16
0
ファイル: redis.py プロジェクト: tmigimatsu/ctrl-utils
def encode_matlab(A: np.ndarray) -> str:
    if len(A.shape) == 1:
        return " ".join(map(str, A.tolist()))
    return "; ".join(" ".join(map(str, row)) for row in A.tolist())
コード例 #17
0
 def from_np(cls, input_bytes: np.ndarray) -> "MathyEnvState":
     """Convert a numpy object into a state object"""
     input_string = "".join([chr(o) for o in input_bytes.tolist()])
     state = cls.from_string(input_string)
     return state
コード例 #18
0
def get_debiasing_projection(classifier_class, cls_params: Dict, num_classifiers: int, input_dim: int,
                             is_autoregressive: bool,
                             min_accuracy: float, X_train: np.ndarray, Y_train: np.ndarray, X_dev: np.ndarray,
                             Y_dev: np.ndarray, noise=False, random_subset=1., by_class=True, Y_train_main=None,
                             Y_dev_main=None) -> np.ndarray:
    """
    :param classifier_class:
    :param num_classifiers:
    :param input_dim:
    :param is_autoregressive:
    :param min_accuracy:
    :param X_train:
    :param Y_train:
    :param X_dev:
    :param Y_dev:
    :return: the debiasing projection
    """

    if by_class and ((Y_train_main is None) or (Y_dev_main is None)): raise Exception()

    P = np.eye(input_dim)
    X_train_cp = X_train.copy()
    X_dev_cp = X_dev.copy()
    labels_set = list(set(Y_train.tolist()))
    main_task_labels = list(set(Y_train_main.tolist()))

    if noise:
        print("Adding noise.")
        mean = np.mean(np.abs(X_train))
        mask_train = 0.0075 * (np.random.rand(*X_train.shape) - 0.5)

        X_train_cp += mask_train

    pbar = tqdm(range(num_classifiers))
    for i in pbar:

        x_t, y_t = X_train_cp.copy(), Y_train.copy()

        clf = classifier.SKlearnClassifier(classifier_class(**cls_params))

        idx = np.random.rand(x_t.shape[0]) < random_subset
        x_t = x_t[idx]
        y_t = y_t[idx]

        #if by_class:
        #    cls = np.random.choice(Y_train_main)  # random.choice(main_task_labels) UNCOMMENT FOR EQUAL CHANCE FOR ALL Y
        #    relevant_idx_train = Y_train_main == cls
        #    relevant_idx_dev = Y_dev_main == cls
        #else:
        #    relevant_idx_train = np.ones(x_t.shape[0], dtype=bool)
        #    relevant_idx_dev = np.ones(X_dev_cp.shape[0], dtype=bool)

        acc = clf.train_network(x_t, y_t, X_dev_cp, Y_dev)
        pbar.set_description("iteration: {}, accuracy: {}".format(i, acc))
        if acc < min_accuracy: continue

        W = clf.get_weights()
        P_i = get_nullspace_projection(W)
        P = P.dot(P_i)

        if is_autoregressive:
            X_train_cp = X_train_cp.dot(P_i)
            X_dev_cp = X_dev_cp.dot(P_i)

    return P
コード例 #19
0
 def _get_vector_similarity_query(self, query_emb: np.ndarray, top_k: int):
     """
     Generate Elasticsearch query for vector similarity.
     """
     query = {"knn": {self.embedding_field: {"vector": query_emb.tolist(), "k": top_k}}}
     return query
コード例 #20
0
ファイル: hyq_logger.py プロジェクト: cracyling/gym_robo
def adapt_np_array(arr: np.ndarray):
    return json.dumps(arr.tolist())
コード例 #21
0
 def serialize_training_result(self, training_result: np.ndarray) -> bytes:
     return training_result.tolist()
コード例 #22
0
def online_qp(velqp: VelQP,
              v_ini: float,
              kappa: np.ndarray,
              delta_s: np.ndarray,
              P_max: np.array = None,
              ax_max: np.array = None,
              ay_max: np.array = None,
              x0_v: np.ndarray = None,
              v_max: np.ndarray = None,
              v_end: float = None,
              F_ini: float = None,
              s_glob: float = None,
              v_max_cstr: np.ndarray = None) -> tuple:
    """
    Python version: 3.5
    Created by: Thomas Herrmann ([email protected])
    Created on: 01.11.2019

    Documentation: Creates an SQP that optimizes a velocity profile for a given path.

    Inputs:
    velqp: QP solver object used within the SQP
    v_ini: initial velocity hard constraint [m/s]
    kappa: kappa profile of given path [rad/m]
    delta_s: discretization step length [m]
    P_max: max. allowed power [kW]
    ax_max: max. allowed longitudinal acceleration [m/s^2]
    ay_max: max. allowed ongitudial acceleration [m/s^2]
    x0_v: initial guess of optimal velocity [m/s]
    v_max: max. should velocity (objective function) [m/s]
    v_end: constrained end velocity in optimization horizon [m/s]
    F_ini: initial force constraint [kN]
    s_glob: global s coordinate of current vehicle position [m]
    v_max_cstr: max. must velocity (hard constraint) [m/s]

    Outputs:
    v_op: optimized velocity using OSQP as QP solver [m/s]
    s_t_op: optimized slack values [-]
    qp_status: status of last QP within SQP [-]
    """

    # --- Steplength reduction parameter for Armijo rule
    beta = 2 / 4

    # --- Initialization of logging variables
    x0_v_log = None
    x0_s_t_log = None
    kappa_log = None
    delta_s_log = None
    ax_max_log = None
    ay_max_log = None
    v_ini_log = None
    v_max_log = None
    v_end_log = None
    F_ini_log = None

    ####################################################################################################################
    # --- Preparing input parameters for SQP
    ####################################################################################################################

    # --- Emergency SQP
    # upper bound for last velocity entry depending on current velocity
    if velqp.sid == 'EmergSQP':
        # --- Upper bound for last velocity point
        v_end = 0.4

        v_max = np.array(
            [v_ini - (x + 1) * 4 for x in range(velqp.sqp_stgs['m'])])
        # set all values below threshold to threshold
        v_max[v_max < 0.0] = 0.0

        # --- Assume linear velocity decrease to v_end from current velocity
        x0_v = np.array([
            v_ini + x * (v_max[-1] - v_ini) / velqp.sqp_stgs['m']
            for x in range(velqp.sqp_stgs['m'])
        ])

        # Initialize slack variables with zero values
        x0_s_t = np.array([0] * velqp.n)

        # Overwrite None
        F_ini = 0
        s_glob = 0

    # --- /Emergency SQP

    # --- Performance SQP
    else:
        x0_s_t = np.array([0] * velqp.n)
    # --- /Performance SQP

    # --- Make sure to always have a numeric velocity > 0.1
    if v_ini < velqp.sym_sc_['vmin_mps_']:
        v_ini = velqp.sym_sc_['vmin_mps_']
    x0_v[0] = v_ini

    # --- Initialization of optimization variables
    # velocity [m/s]
    v_op = np.zeros(velqp.sqp_stgs['m'], )
    # slack variable on tires
    s_t_op = np.zeros(velqp.n, )

    qp_iter = 0
    qp_status = 0

    # SQP mean-error
    err = np.inf
    # SQP infinity-error
    err_inf = np.inf

    # SQP-counter
    n_sqp = 0
    # SQP-timer
    dt = 0

    # time limit of SQP loop [s]
    dt_lim = velqp.sqp_stgs['t_sqp_max']

    # --- Save inputs to SQP here for logging purpose
    x0_v_log = copy.deepcopy(x0_v.tolist())
    x0_s_t_log = copy.deepcopy(x0_s_t.tolist())
    kappa_log = kappa.tolist()
    delta_s_log = delta_s.tolist()
    v_ini_log = v_ini
    try:
        v_max_log = copy.deepcopy(v_max.tolist())
    except AttributeError:
        v_max_log = copy.deepcopy(v_max)

    v_end_log = v_end
    F_ini_log = F_ini

    if isinstance(ax_max, np.ndarray) and isinstance(ay_max, np.ndarray):
        ax_max_log = ax_max.tolist()
        ay_max_log = ay_max.tolist()
    else:
        ax_max_log = velqp.sym_sc_['axmax_mps2_']
        ay_max_log = velqp.sym_sc_['aymax_mps2_']

    if isinstance(P_max, np.ndarray):
        Pmax_log = P_max.tolist()
    else:
        Pmax_log = velqp.sym_sc_['Pmax_kW_']

    # --- Start SQP-loop
    t_start = time.time()
    while (err > velqp.err or err_inf > velqp.err_inf
           ) and n_sqp < velqp.sqp_stgs['n_sqp_max'] and dt < dt_lim:

        # --- Update parameters of QP
        if len(x0_v) is not int(velqp.sqp_stgs['m']):
            print("Error in x0-length in ", velqp.sid)
            print(x0_v)
        if len(v_max) is not int(velqp.sqp_stgs['m']):
            print("Error in v_max-length in ", velqp.sid)
            print(v_max)

        # --- Update QP matrices
        velqp.osqp_update_online(x0_v=x0_v,
                                 x0_s_t=x0_s_t,
                                 v_ini=v_ini,
                                 v_max=v_max,
                                 v_end=v_end,
                                 F_ini=F_ini,
                                 kappa=kappa,
                                 delta_s=delta_s,
                                 P_max=P_max,
                                 ax_max=ax_max,
                                 ay_max=ay_max,
                                 vmax_cstr=v_max_cstr)

        # --- Solve the QP
        sol, qp_iter, qp_status = velqp.osqp_solve()

        # --- Check primal infeasibility of problem and return v = 0
        if qp_status == -3:
            break

        # --- Store solution from previous SQP-iteration
        o_old = np.append(v_op, x0_s_t)
        # --- Store errors from previous SQP-iteration
        err_old = err
        err_inf_old = err_inf
        try:
            ############################################################################################################
            # --- Armijo: decrease steplength alpha if SQP-error increases between iterations
            ############################################################################################################
            k = 0  # counter for Armijo-loop
            while True:
                # --- Choose a steplength
                alpha = beta**k  # steplength e {1, beta, beta^2, ...}

                # --- Restructure solution from QP-solution
                # Add leading "0" as ini-velocity must be kept constant as given
                v_op = alpha * np.insert(sol[0:velqp.m - 1], 0, 0) + x0_v

                s_t_op = alpha * sol[velqp.m - 1:] + x0_s_t

                # --- Calculate SQP iteration error
                o = np.append(v_op, s_t_op)
                err = np.sqrt(np.matmul(o - o_old, o - o_old)) / o.shape[0]
                err_inf = np.max(np.abs(o - o_old))

                # --- Break Armijo-loop in case a suitable steplength alpha was found
                if err < err_old and err_inf < err_inf_old:
                    break
                # --- Increase Armijo-loop's counter and restart loop
                else:
                    k += 1

                if velqp.sqp_stgs['b_print_sqp_alpha']:
                    print(velqp.sid + " | alpha: " + str(alpha))

            ############################################################################################################
            # --- Postprocessing Armijo-loop: Create new operating point for optimization variables
            ############################################################################################################
            # --- Create new operating-point for velocity variables
            x0_v = v_op
            # --- Create new operating-point for tire slack variables
            x0_s_t = s_t_op

        except TypeError:
            # --- Do different initialization till n_sqp_max is reached
            if velqp.sid == 'EmergSQP':
                x0_v = (v_ini - 0.05) * np.ones((velqp.m, ))
                v_op = np.zeros(velqp.sqp_stgs['m'], )
                x0_s_t = np.zeros(velqp.n, )
                s_t_op = np.zeros(velqp.n, )
                print(
                    "No solution for emerg. line found. Retrying with different initialization ..."
                )

                # Reset SQP-counter
                n_sqp = 0

        if not velqp.sqp_stgs['b_online_mode']:
            print('Optimized velocity profile: ', v_op[0:velqp.sqp_stgs['m']])
            if velqp.sqp_stgs['obj_func'] == 'slacks':
                print('Slacks on velocity: ',
                      v_op[velqp.sqp_stgs['m']:2 * velqp.sqp_stgs['m']])

        if velqp.sqp_stgs['b_print_sqp_err']:
            print(velqp.sid + " | SQP err: " + str(err))
            print(velqp.sid + " | SQP inf.-err: " + str(err_inf))

        ################################################################################################################
        # --- Check termination criteria for SQP-loop
        ################################################################################################################
        # increase SQP-iteration counter
        n_sqp += 1

        if n_sqp >= velqp.sqp_stgs['n_sqp_max']:
            print(velqp.sid + " reached max. SQP iteration-number!")

        # update timer
        dt = time.time() - t_start
        if dt >= dt_lim:
            print(velqp.sid + " took too long!")

    if velqp.sqp_stgs['b_print_SQP_runtime']:
        print(velqp.sid + " | SQP time [ms]: " + str(dt * 1000))

    # Only write to log-file after SQP-iterations
    if velqp.sid == 'PerfSQP' and velqp.logger_perf is not None:
        velqp.logger_perf.debug('%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s',
                                str(datetime.datetime.now().time()), s_glob,
                                json.dumps(x0_v_log), json.dumps(x0_s_t_log),
                                json.dumps(kappa_log),
                                json.dumps(delta_s_log), v_ini_log,
                                json.dumps(v_max_log), v_end_log, F_ini_log,
                                Pmax_log, json.dumps(qp_iter),
                                json.dumps(qp_status), json.dumps(dt * 1000))

        velqp.logger_perf.debug('%s', v_op.tolist())

        velqp.logger_perf.debug('%s', s_t_op.tolist())

        velqp.logger_perf.debug('%s;%s', ax_max_log, ay_max_log)

    elif velqp.sid == 'EmergSQP' and velqp.logger_emerg is not None:
        velqp.logger_emerg.debug('%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s',
                                 str(datetime.datetime.now().time()), s_glob,
                                 json.dumps(x0_v_log), json.dumps(x0_s_t_log),
                                 json.dumps(kappa_log),
                                 json.dumps(delta_s_log), v_ini_log,
                                 json.dumps(v_max_log), v_end_log, F_ini_log,
                                 Pmax_log, json.dumps(qp_iter),
                                 json.dumps(qp_status), json.dumps(dt * 1000))

        velqp.logger_emerg.debug('%s', v_op.tolist())

        velqp.logger_emerg.debug('%s', s_t_op.tolist())

        velqp.logger_emerg.debug('%s;%s', ax_max_log, ay_max_log)

    if velqp.sqp_stgs['b_trajectory_check']:
        ax_norm = np.abs(kappa[0:velqp.sqp_stgs['m'] - 1] * v_op[0:velqp.sqp_stgs['m'] - 1] ** 2) / \
            velqp.sym_sc_['axmax_mps2_']
        ay_norm = np.abs(
            (v_op[1:velqp.sqp_stgs['m']] ** 2 - v_op[0:velqp.sqp_stgs['m'] - 1] ** 2) / (2 * np.array(delta_s))
            + (velqp.sym_sc_['c_res_'] * v_op[0:velqp.sqp_stgs['m'] - 1] ** 2) / (1000 * velqp.sym_sc_['m_t_'])) / \
            velqp.sym_sc_['aymax_mps2_']

        perf_check = (ax_norm + ay_norm) > 1

        if perf_check[:-1].any():
            print(ax_norm + ay_norm)
            print('*** SQP: Trajectory not OK! ', velqp.sid, ' ***')

    if velqp.sqp_stgs['b_print_n_sqp']:
        print(velqp.sid + ' | nSQP ' + str(n_sqp))

    if velqp.sqp_stgs['b_print_s_v_val']:
        print(velqp.sid + ' | s_v_tires ' + str(x0_s_t))

    if velqp.sqp_stgs['b_print_J']:
        print("(v - v_max) ** 2", np.sum((v_op - v_max)**2))
        print("Tre. slacks", velqp.sym_sc_['s_tre_w_'] * np.sum(s_t_op**2))

    return v_op, s_t_op, qp_status
コード例 #23
0
 def save_outcome(self, base_directory: str, dataset: DataFrame,
                  outcome: ndarray):
     dataset['outcome'] = outcome.tolist()
     dataset.to_csv(os.path.join(base_directory, 'outcome.csv'),
                    index=False)
コード例 #24
0
 def _post_process(self, prediction: np.ndarray) -> HousePredictionResult:
     logger.debug("Post-processing prediction.")
     result = prediction.tolist()
     human_readable_unit = result[0] * self.RESULT_UNIT_FACTOR
     hpp = HousePredictionResult(median_house_value=human_readable_unit)
     return hpp
コード例 #25
0
 def _write_result(cls, result: np.ndarray, dest: str):
     with open(dest, 'w+') as file:
         file.write(json.dumps({'result': result.tolist()}))
     print(f'you can find result in file {dest}')
コード例 #26
0
 def update_board(self, board: np.ndarray):
     # Warning: will remove all the piece indices inside the board (set to 1)
     self.board = board.tolist()
コード例 #27
0
    def test_correlation_video_capture(
            cv_video_capture: CVVideoCapture,
            correlation_limit,
            frame_acceptance_np: np.ndarray,
            frame_start=0,
            frame_end=None,
            batch_size=200,
            gray_scale_conversion_code=cv2.COLOR_BGR2GRAY,
            progress_tracker: CVProgressTracker = None):
        frame_count = int(cv_video_capture.get_frame_count())
        if frame_end:
            cv_video_capture.set_position_frame(frame_start)
            frame_count = min(frame_end - frame_start, frame_count)
            frame_count = max(frame_count, 0)

        if progress_tracker:
            progress_tracker.running = True

        frame_acceptance_ctype = multiprocessing.Array(
            'b', frame_acceptance_np.tolist())
        progress_value = multiprocessing.Value('d')
        lock_video_capture = multiprocessing.RLock()

        skip_window_both_end = int(cv_video_capture.get_frame_rate())
        worker_count = multiprocessing.cpu_count()
        task_per_worker = int(frame_count / worker_count)
        args_list = [(task_per_worker * i, task_per_worker * (i + 1),
                      frame_start, frame_count, batch_size, correlation_limit,
                      frame_acceptance_ctype, cv_video_capture.file_handle,
                      progress_value, lock_video_capture,
                      gray_scale_conversion_code, skip_window_both_end)
                     for i in range(0, worker_count - 1)]
        args_list.append(
            (task_per_worker * (worker_count - 1), frame_count, frame_start,
             frame_count, batch_size, correlation_limit,
             frame_acceptance_ctype, cv_video_capture.file_handle,
             progress_value, lock_video_capture, gray_scale_conversion_code,
             skip_window_both_end))

        processes = [
            Process(target=_test_correlation_capture_worker, args=arg_tuple)
            for arg_tuple in args_list
        ]

        def update_progress_tracker_first_pass():
            progress_tracker.progress = progress_value.value / worker_count * 0.7

        progress_timer = RepeatingTimer(0.1,
                                        update_progress_tracker_first_pass)

        progress_value.value = 0
        if progress_tracker:
            progress_timer.start()

        if progress_tracker:
            progress_tracker.running = True
        for p in processes:
            p.start()
        for p in processes:
            p.join()

        print('[Correlation] final pass')

        final_pass_ranges = generate_multiprocessing_final_pass_ranges \
            (frame_acceptance_ctype, frame_count, task_per_worker, worker_count, skip_window_both_end)

        final_pass_arg_list = [(
            range_i[0],
            range_i[1],
            frame_start,
            frame_count,
            batch_size,
            correlation_limit,
            frame_acceptance_ctype,
            cv_video_capture.file_handle,
            progress_value,
            lock_video_capture,
            gray_scale_conversion_code,
        ) for range_i in final_pass_ranges]

        final_pass_processes = [
            Process(target=_test_correlation_capture_worker, args=arg_tuple)
            for arg_tuple in final_pass_arg_list
        ]

        def update_progress_tracker_final_pass():
            progress_tracker.progress = 0.7 + progress_value.value / worker_count * 0.3

        progress_value.value = 0
        if progress_tracker:
            progress_timer.function = update_progress_tracker_final_pass

        for p in final_pass_processes:
            p.start()
        for p in final_pass_processes:
            p.join()

        if progress_tracker:
            progress_timer.cancel()
            progress_tracker.complete()

        return np.array(frame_acceptance_ctype, dtype=np.bool_).copy()
コード例 #28
0
def jaccard_similarity(vec1: np.ndarray, vec2: np.ndarray) -> float:
    return len((set(vec1.tolist()).intersection(set(vec2.tolist())))) / len(
        (set(vec1.tolist()).union(set(vec2.tolist()))))
コード例 #29
0
ファイル: historydb.py プロジェクト: kadircs/GPTune
    def update_model_LCM(self,\
            objective : int,
            problem : Problem,\
            input_given : np.ndarray,\
            bestxopt : np.ndarray,\
            neg_log_marginal_likelihood : float,\
            gradients : np.ndarray,\
            iteration : int):

        if (self.tuning_problem_name is not None):
            json_data_path = self.history_db_path + "/" + self.tuning_problem_name + ".json"

            new_surrogate_models = []

            now = time.localtime()

            #from scipy.stats.mstats import gmean
            #from scipy.stats.mstats import hmean
            model_stats = {}
            model_stats["log_likelihood"] = -1.0 * neg_log_marginal_likelihood
            model_stats["neg_log_likelihood"] = neg_log_marginal_likelihood
            model_stats["gradients"] = gradients.tolist()
            #model_stats["gradients_sum_abs"] = np.sum(np.absolute(gradients))
            #model_stats["gradients_average_abs"] = np.average(np.absolute(gradients))
            #model_stats["gradients_hmean_abs"] = hmean(np.absolute(gradients))
            #model_stats["gradients_gmean_abs"] = gmean(np.absolute(gradients))
            model_stats["iteration"] = iteration

            gradients_list = gradients.tolist()

            problem_space = {}
            problem_space["input_space"] = self.problem_space_to_dict(
                problem.IS)
            problem_space["parameter_space"] = self.problem_space_to_dict(
                problem.PS)
            problem_space["output_space"] = self.problem_space_to_dict(
                problem.OS)

            task_parameter_orig = problem.IS.inverse_transform(
                np.array(input_given, ndmin=2))
            task_parameter_orig_list = np.array(task_parameter_orig).tolist()

            new_surrogate_models.append({
                "hyperparameters": bestxopt.tolist(),
                "model_stats": model_stats,
                "func_eval": self.uids,
                "task_parameters": task_parameter_orig_list,
                "problem_space": problem_space,
                "modeler": "Model_LCM",
                "objective_id": objective,
                "time": {
                    "tm_year": now.tm_year,
                    "tm_mon": now.tm_mon,
                    "tm_mday": now.tm_mday,
                    "tm_hour": now.tm_hour,
                    "tm_min": now.tm_min,
                    "tm_sec": now.tm_sec,
                    "tm_wday": now.tm_wday,
                    "tm_yday": now.tm_yday,
                    "tm_isdst": now.tm_isdst
                },
                "uid": str(uuid.uuid1())
                # objective id is to dinstinguish between different models for multi-objective optimization;
                # we might need a nicer way to manage different models
            })

            if self.file_synchronization_method == 'filelock':
                with FileLock(json_data_path + ".lock"):
                    with open(json_data_path, "r") as f_in:
                        json_data = json.load(f_in)
                        json_data["model_data"] += new_surrogate_models
                    with open(json_data_path, "w") as f_out:
                        json.dump(json_data, f_out, indent=2)
            elif self.file_synchronization_method == 'rsync':
                while True:
                    temp_path = json_data_path + "." + self.process_uid + ".temp"
                    os.system("rsync -a " + json_data_path + " " + temp_path)
                    with open(temp_path, "r") as f_in:
                        json_data = json.load(f_in)
                        json_data["model_data"] += new_surrogate_models
                    with open(temp_path, "w") as f_out:
                        json.dump(json_data, f_out, indent=2)
                    os.system("rsync -u " + temp_path + " " + json_data_path)
                    os.system("rm " + temp_path)
                    with open(json_data_path, "r") as f_in:
                        json_data = json.load(f_in)
                        existing_uids = [
                            item["uid"] for item in json_data["model_data"]
                        ]
                        new_uids = [
                            item["uid"] for item in new_surrogate_models
                        ]
                        retry = False
                        for uid in new_uids:
                            if uid not in existing_uids:
                                retry = True
                                break
                        if retry == False:
                            break
            else:
                with open(json_data_path, "r") as f_in:
                    json_data = json.load(f_in)
                    json_data["model_data"] += new_surrogate_models
                with open(json_data_path, "w") as f_out:
                    json.dump(json_data, f_out, indent=2)

        return
コード例 #30
0
ファイル: codecs_test.py プロジェクト: Yamp/home_lab
def serialize_rjson(arr: np.ndarray):
    return rapidjson.dumps(arr.tolist()).encode(encoding='utf8')
コード例 #31
0
def store_matrix_cache(matrix: np.ndarray):
    if not enable_cache:
        return
    redis_template.db(0).set(MATRIX_KEY, json.dumps(matrix.tolist()))
    logger.info("stored the matrix in redis as cache")
コード例 #32
0
ファイル: prerolled.py プロジェクト: Kunstmord/krakenous
def numpy_array_serializer(array: np.ndarray) -> str:
    return dumps(array.tolist())
コード例 #33
0
 def _transform_matrix_to_ir(matrix: np.ndarray):
     return [[[element.real, element.imag] for element in row] for row in matrix.tolist()]