Beispiel #1
0
    def test_randomized(self):
        for i in range(4):
            print(i)
            size = 257
            d1 = np.empty([size, size])
            d2 = np.empty(size)

            for i, j in np.ndindex((size, size)):
                d1[i, j] = random.randrange(1, 100)

            for i in range(size):
                d2[i] = random.randrange(1, 100)

            start = dt()
            np_solution = np.linalg.solve(d1, d2)
            print("Numpy solution lasted {}.".format(dt() - start))
            print(str(np_solution[:100]))

            start = dt()
            my_solution = solve(d1, d2)
            print("My solution lasted {}.".format(dt() - start))
            print(str(my_solution))

            for i, j in zip(my_solution, np_solution):
                if not np.isclose(i, j):
                    print(i, j)
                self.assertAlmostEqual(i, j)
Beispiel #2
0
    def solve(self):
        self.VC = []
        self.sc = []
        self.MUC = []

        self.VSF = []
        self.ssf = []
        self.MUSF = []

        self.VSM = []
        self.ssm = []
        self.MUSM = []

        for t in range(self.T, 0, -1):
            # couples:

            print('solving for t = {}'.format(t))
            try:
                Vnext = self.VC[0]
                MUnext = self.MUC[0]
            except:
                Vnext = 3 * (np.zeros(self.v_couple_shape[-1]), )
                MUnext = np.zeros(self.v_couple_shape[-1])

            Vthis, MUthis, s = iteration_couples(self, t, Vnext, MUnext)
            self.sc = [s] + self.sc
            self.VC = [Vthis] + self.VC
            self.MUC = [MUthis] + self.MUC
            print('couples done, time {}'.format(dt() - self.t0))

            # singles:

            try:
                Vnext = self.VSF[0]
                MUnext = self.MUSF[0]
            except:
                Vnext = np.zeros(self.v_sf_shape[-1])
                MUnext = np.zeros(self.v_sf_shape[-1])

            Vthis, MUthis, s = iteration_singles(self, t, Vnext, MUnext, True)
            self.ssf = [s] + self.ssf
            self.VSF = [Vthis] + self.VSF
            self.MUSF = [MUthis] + self.MUSF
            print('single female done, time {}'.format(dt() - self.t0))

            try:
                Vnext = self.VSM[0]
                MUnext = self.MUSM[0]
            except:
                Vnext = np.zeros(self.v_sm_shape[-1])
                MUnext = np.zeros(self.v_sm_shape[-1])

            Vthis, MUthis, s = iteration_singles(self, t, Vnext, MUnext, False)
            self.ssm = [s] + self.ssm
            self.VSM = [Vthis] + self.VSM
            self.MUSM = [MUthis] + self.MUSM
            print('single male done, time {}'.format(dt() - self.t0))
Beispiel #3
0
def opencv_call(x):
    start_time = dt()
    H = c_long(x.shape[0])
    W = c_long(x.shape[1])
    P = x.ctypes.data_as(c_void_p)  # The C pointer
    double_me_lib.double_me(P, W, H)
    rt = (dt() - start_time) * 1000
    # print("OpenCV C code took %d ms for %d floats" % (rt, x.size))
    return rt
Beispiel #4
0
 def __init__(self, **kwagrs):
     self.setup = Setup(**kwagrs)
     self.t0 = dt()
     s = self.setup
     self.T = self.setup.T
     self.v_couple_shape = [(s.na_c, s.nexo_t[t], s.ntheta_coarse)
                            for t in range(self.T)]
     self.v_sf_shape = [(s.na_s, s.n_zf) for t in range(self.T)]
     self.v_sm_shape = [(s.na_s, s.n_zm) for t in range(self.T)]
     print('setup created, time {}'.format(dt() - self.t0))
     self.solve()
Beispiel #5
0
def evaluate(
    model, img_emb, txt_emb, lengths,
    device, shared_size=128, return_sims=False
):
    model.eval()
    _metrics_ = ('r1', 'r5', 'r10', 'medr', 'meanr')

    begin_pred = dt()

    img_emb = torch.tensor(img_emb).to(device)
    txt_emb = torch.tensor(txt_emb).to(device)

    end_pred = dt()
    sims = model.compute_pairwise_similarity(
        model.similarity, img_emb, txt_emb, lengths,
        shared_size=shared_size
    )
    print(sims.min(), sims.max(), sims.mean())
        # sims = model.get_sim_matrix(
        #     embed_a=img_emb, embed_b=txt_emb,
        #     lens=lengths,
        # )
    div = sims.shape[1] / sims.shape[0]
    samp_sim = sims[:,np.arange(0, sims.shape[1], div).astype(np.int)]

    val_loss = model.multimodal_criterion(samp_sim)
    sims = layers.tensor_to_numpy(sims)

    end_sim = dt()

    i2t_metrics = i2t(sims)
    t2i_metrics = t2i(sims)

    rsum = np.sum(i2t_metrics[:3]) + np.sum(t2i_metrics[:3])

    i2t_metrics = {f'i2t_{k}': v for k, v in zip(_metrics_, i2t_metrics)}
    t2i_metrics = {f't2i_{k}': v for k, v in zip(_metrics_, t2i_metrics)}

    metrics = {
        'pred_time': end_pred-begin_pred,
        'sim_time': end_sim-end_pred,
        'val_loss': val_loss,
    }
    metrics.update(i2t_metrics)
    metrics.update(t2i_metrics)
    metrics['rsum'] = rsum

    if return_sims:
        return metrics, sims

    return metrics
Beispiel #6
0
def updateProgBar(curIter, totalIter, t0, barLength=20, decimals=0):
    """
    Update progress bar. Place this function anywhere in a loop where you want
    to keep track of the loop's progress.

    Parameters
    ----------
    curIter : int
        The current iteration.
    totalIter : int
        The total number of iterations. 
    t0 : numeric
        The start time of the operation (in seconds).
    barLength : int, optional
        The length of the progress bar. The default is 20.
    decimals : int, optional
        The number of decimal places to use for tracking miliseconds.
        The default is 0

    Returns
    -------
    None.

    """
    status = "Working..."
    progress = float(curIter) / float(totalIter)
    if isinstance(progress, int):
        progress = float(progress)
    if progress >= 1:
        progress = 1
        status = "Finished!..."
    block = int(round(barLength * progress))
    text = "\rPercent: [{0}] {1:.2f}% iter: {2}/{3} {4} Elapsed: {5}, Estimated: {6}".format(
        "#" * block + "-" * (barLength - block), round(progress * 100.0, 2),
        curIter, totalIter, status,
        pretty_print_time(t0, dt(), decimals=decimals),
        pretty_print_time((dt() - t0) / curIter * (totalIter - curIter),
                          decimals=decimals))
    if progress >= 1:
        sys.stdout.write(text + "\r\n")
        sys.stdout.flush()
    else:
        sys.stdout.write(text)
        sys.stdout.flush()
Beispiel #7
0
def evaluate(
    model, img_emb, txt_emb, lengths,
    device, shared_size=128, return_sims=False
):
    model.eval()
    _metrics_ = ('r1', 'r5', 'r10', 'medr', 'meanr')

    begin_pred = dt()

    img_emb = torch.FloatTensor(img_emb).to(device)
    txt_emb = torch.FloatTensor(txt_emb).to(device)

    end_pred = dt()
    sims = model.get_sim_matrix_shared(
        embed_a=img_emb, embed_b=txt_emb,
        lens=lengths, shared_size=shared_size
    )
    sims = layers.tensor_to_numpy(sims)
    end_sim = dt()

    i2t_metrics = i2t(sims)
    t2i_metrics = t2i(sims)
    rsum = np.sum(i2t_metrics[:3]) + np.sum(t2i_metrics[:3])

    i2t_metrics = {f'i2t_{k}': v for k, v in zip(_metrics_, i2t_metrics)}
    t2i_metrics = {f't2i_{k}': v for k, v in zip(_metrics_, t2i_metrics)}

    metrics = {
        'pred_time': end_pred-begin_pred,
        'sim_time': end_sim-end_pred,
    }
    metrics.update(i2t_metrics)
    metrics.update(t2i_metrics)
    metrics['rsum'] = rsum

    if return_sims:
        return metrics, sims

    return metrics
Beispiel #8
0
    def _forward(self, batch, lang_iters, epoch):
        self.model.train()
        self.optimizer.zero_grad()
        begin_forward = dt()

        multimodal_loss = self._forward_multimodal_loss(batch)
        total_lang_loss, loss_info = self._get_multilanguage_total_loss(
            lang_iters)
        total_loss = multimodal_loss + total_lang_loss
        total_loss.backward()

        norm = 0.
        if self.clip_grad > 0:
            norm = clip_grad_norm_(self.model.parameters(), self.clip_grad)

        self.optimizer.step()
        if self.lr_scheduler is not None:
            self.lr_scheduler.step()

        end_backward = dt()
        batch_time = end_backward - begin_forward
        return self._update_train_info(batch_time, multimodal_loss, total_loss,
                                       epoch, norm, loss_info)
Beispiel #9
0
    def ForwardSelectionAnalysis(self,
                                 cv_iterations=5,
                                 random_state=123,
                                 default_penalty=0.01,
                                 for_stride=2,
                                 reduce_features_by=30,
                                 red_metric='bic',
                                 verbose=True,
                                 t0=None):
        """
        Run very basic forward selection procedure on the model.

        Parameters
        ----------
        default_penalty : float, optional
            The penalty term to be applied to models where the penalty is
            otherwise unspecified. The default is 0.01.
        for_stride : int
            Window size of the coefficients to test in the forward selection
            procedure. For example, stride=1 is 1,2,3,4,..., stride=2 is
            1,3,5,7,... The default is 2.
        reduce_features_by : int, optional
            Number of features to evaluate in the final model.
            The default is 30.
        red_metric : str, optional
            Metric to use for finding optimal penalty value.
            The default is 'bic'.
        verbose : bool, optional
            Print results of the process. The default is True.
        t0 : float, optional
            Initial start time for the process. The default is None.

        Returns
        -------
        None.

        """
        # TODO: make final feature selection automatic
        self._fsa_analysis_arglist = {
            'cv_iterations': cv_iterations,
            'random_state': random_state,
            'default_penalty': default_penalty,
            'for_stride': for_stride,
            'reduce_features_by': reduce_features_by,
            'red_metric': red_metric,
            'verbose': verbose,
            't0': t0
        }

        if t0 is None:
            t0 = dt()

        self.plotAICs()
        aic_type = {'aic': 0, 'aicc': 1, 'bic': 2, 'ebic': 3, 'misclass': 4}
        self.opt_c = self.reduction[np.where(
            self.aics[:, aic_type[red_metric]] == np.min(
                self.aics[:, aic_type[red_metric]]))][0]
        self.optimal_num_params = np.array(self.nonzero_coefs)[np.where(
            self.aics[:, aic_type[red_metric]] == np.min(
                self.aics[:, aic_type[red_metric]]))][0]

        if verbose:
            print_time("\nCreating Model with Optimal Penalty Value...",
                       t0,
                       te=dt())

        self.build_model('lasso_reduction_mod', cv_iterations, self.opt_c,
                         random_state)

        coef = self.steps['lasso_reduction_mod']['model'].coef_.reshape(-1, )
        coef_ord = pd.DataFrame(
            np.abs(coef),
            index=self.train.loc[:, self.cur_pred_list].columns,
            columns=["Importance"])
        coef_ord = coef_ord.sort_values('Importance', ascending=False)

        keep_cols = list(coef_ord[coef_ord["Importance"] > 0].index)
        keep_cols.append(self.target_var)
        self.train = self.train.loc[:, keep_cols]
        self.cur_pred_list = self.train.columns[np.where(
            self.target_var != self.train.columns)[0]]
        self.steps['lasso_reduction_mod']['keep_cols'] = keep_cols
        keep_cols = []
        coef_ord = []
        coef = []

        if verbose:
            print_time(
                "\nCreating Model with Optimal Penalty Value After Removing Zeroed Parameters...\n",
                t0,
                te=dt())

        self.build_model('lasso_reduction_mod_reduced', cv_iterations, 1,
                         random_state)

        coef_red = self.steps['lasso_reduction_mod_reduced'][
            'model'].coef_.reshape(-1, 1)
        coef_ord_red = pd.DataFrame(
            np.concatenate(
                (np.abs(coef_red), coef_red, 100.0 * (np.exp(coef_red) - 1)),
                axis=1),
            index=self.train.loc[:, self.cur_pred_list].columns,
            columns=["Importance", "Coefficiants", "% increase in Prob"])
        coef_ord_red = coef_ord_red.sort_values('Importance', ascending=False)

        self.steps['lasso_reduction_mod_reduced'][
            'coef_ord_red'] = coef_ord_red

        self.forwardElimMetricCalc(coef_ord_red, for_stride, random_state)
        self.plotForwardMetrics()

        cols_to_include = list(coef_ord_red.index[:reduce_features_by])
        self.cur_pred_list = cols_to_include

        if verbose:
            print_time("\nCreating Final Model...", t0, te=dt())

        self.build_model('final_mod', cv_iterations, 1, random_state)

        coef_red_final = self.steps['final_mod']['model'].coef_.reshape(-1, 1)
        coef_ord_red_final = pd.DataFrame(
            np.concatenate((np.abs(coef_red_final), coef_red_final, 100.0 *
                            (np.exp(coef_red_final) - 1)),
                           axis=1),
            index=self.train.loc[:, cols_to_include].columns,
            columns=["Importance", "Coefficiants", "% increase in Prob"])
        coef_ord_red_final = coef_ord_red_final.sort_values('Importance',
                                                            ascending=False)
        self.coef_ord_red_final = coef_ord_red_final

        self.Xtrain = self.train.loc[:, cols_to_include]
        self.Ytrain = self.train.loc[:, self.target_var]
        self.Ypred_train = self.steps['final_mod']['model'].predict(
            self.Xtrain)
        self.Yprob_train = self.steps['final_mod']['model'].predict_proba(
            self.Xtrain)

        self.Xtest = self.holdout.loc[:, cols_to_include]
        self.Ytest = self.holdout.loc[:, self.target_var]
        self.Ypred_test = self.steps['final_mod']['model'].predict(self.Xtest)
        self.Yprob_test = self.steps['final_mod']['model'].predict_proba(
            self.Xtest)[:, 1]

        if verbose:
            print_time("\nFinished...", t0, te=dt())
Beispiel #10
0
    def BaselineDropProcedures(self,
                               freshStart=True,
                               downsample=False,
                               dropCols=None,
                               dropCor=True,
                               corr_cutoff=0.9,
                               dropVar=True,
                               dropVarTol=0.001,
                               cv_iterations=5,
                               random_state=123,
                               default_penalty=0.01,
                               verbose=True,
                               t0=None):
        """
        Create Baseline model and begin dropping features from the model 
        based on given criteria.

        Parameters
        ----------
        freshStart : bool, optional
            Whether to use a previous result, or start from scrath.
            The default is True.
        downsample : bool, optional
            Whether or not to downsample the majority class.
            The default is False.
        dropCols : list, optional
            List of columns to initially drop. The default is None.
        dropCor : bool, optional
            Drop variables with high correlations. The default is True.
        corr_cutoff : float, optional
            Cutoff for removing high correlation variables. Anything below the
            cutoff is kept. The default is 0.9.
        dropVar : bool, optional
            Drop variables with very low variance.
            Does nothing if data is standardized. The default is True.
        dropVarTol : float, optional
            Cutoff value for low variance. The default is 0.001.
        cv_iterations : int, optional
            The number of cross validation steps. The default is 5.
        random_state : int, optional
            The random seed for the process. The default is 123.
        default_penalty : float, optional
            The penalty term to be applied to models where the penalty is
            otherwise unspecified. The default is 0.01.
        verbose : bool, optional
            Print steps as they complete. The default is True.
        t0 : float, optional
            The start time of the process. For internal use.
            The default is None.

        Returns
        -------
        None.

        """
        self._bdp_analysis_arglist = {
            'freshStart': freshStart,
            'downsample': downsample,
            'dropCols': dropCols,
            'dropCor': dropCor,
            'corr_cutoff': corr_cutoff,
            'dropVar': dropVar,
            'dropVarTol': dropVarTol,
            'cv_iterations': cv_iterations,
            'random_state': random_state,
            'default_penalty': default_penalty,
            'verbose': verbose,
            't0': t0
        }

        if t0 is None:
            t0 = dt()

        if freshStart:
            if not self.low_memory:
                self.train = self.train_raw
                self.holdout = self.holdout_raw
            else:
                print("Can't run fresh start in low memory mode.")

        if verbose:
            print_time("\nCreating Baseline Model...", t0, te=dt())

        self.build_model('baseline_mod', cv_iterations, default_penalty,
                         random_state)

        if verbose:
            print_time("\nExecuting Column Drop Procedures...", t0, te=dt())

        if dropCols is not None:
            if verbose:
                print_time("\nDropping Pre-Determined Columns...", t0, te=dt())

            cols = list(self.train.columns)
            # what I used to do...
            #idx = list(np.where([True if sum((True if reject in col else False for reject in dropCols))>0 else False for col in cols])[0])
            idx = [
                i for i, col in zip(range(len(cols)), cols) if col in dropCols
            ]
            label_idx = list(self.train.columns[idx])
            self.train = self.train.drop(label_idx, axis=1)
            self.cur_pred_list = self.train.columns[np.where(
                self.target_var != self.train.columns)[0]]
            self.num_dropped_cols['dropCol_list'] = len(dropCols)
            if verbose:
                print_time("\nDropped " + str(len(dropCols)) +
                           " Predefined Columns...",
                           t0,
                           te=dt())

        if dropVar:
            if verbose:
                print_time("\nDropping Low Variance Columns...", t0, te=dt())

            varDropList = naiveVarDrop(
                self.train,
                list(self.train.columns[np.where(
                    self.target_var != self.train.columns)[0]]),
                tol=dropVarTol,
                asList=True)

            self.train = self.train.drop(varDropList, axis=1)
            self.varDropList = varDropList
            self.cur_pred_list = self.train.columns[np.where(
                self.target_var != self.train.columns)[0]]
            self.num_dropped_cols['varDropList'] = len(varDropList)
            if verbose:
                print_time("\nDropped " + str(len(varDropList)) +
                           " Low-Variance Columns...",
                           t0,
                           te=dt())
            varDropList = []

        if dropCor:
            if verbose:
                print_time("\nGenerating/Recovering Correlation Matrix...",
                           t0,
                           te=dt())

            if self.corr_mat is None or self.y_corr is None:
                self.GenCorrStats(is_raw=False)
            if self.highCorr is None:
                self.genHighCorrs(corr_cutoff)

            if verbose:
                print_time("\nDropping High Correlations...", t0, te=dt())

            HCdropList = dropHighCorrs(self.train,
                                       self.highCorr,
                                       asList=True,
                                       print_=False)
            self.train = self.train.drop(HCdropList, axis=1)
            self.HCdropList = HCdropList
            self.num_dropped_cols['HCdropList'] = len(HCdropList)
            if verbose:
                print_time("\nDropped " + str(len(HCdropList)) +
                           " Correlated Columns...",
                           t0,
                           te=dt())
            HCdropList = []
            self.cur_pred_list = self.train.columns[np.where(
                self.target_var != self.train.columns)[0]]
            self.plot_y_corr()

        if verbose:
            total_dropped = 0
            for key in self.num_dropped_cols.keys():
                total_dropped += self.num_dropped_cols[key]
            print_time("\nDropped " + str(total_dropped) + " Columns...",
                       t0,
                       te=dt())

        if verbose:
            print_time("\nCreating Post-Drop Baseline Model...", t0, te=dt())

        self.build_model('postdrop_baseline_mod', cv_iterations,
                         default_penalty, random_state)

        if verbose:
            print_time("\nFinished Baseline Drop...", t0, te=dt())
Beispiel #11
0
    def forwardElimMetricCalc(self, coef_ord_red, stride, random_state):
        """
        Run a very basic forward elimination procedure, and calculate model 
        metrics.

        Parameters
        ----------
        coef_ord_red : list
            A list of model coefficients, ordered by importance.
        stride : int
            Window size of the coefficients to test in the forward selection
            procedure. For example, stride=1 is 1,2,3,4,..., stride=2 is
            1,3,5,7,...
        random_state : int
            Random seed for procedure.

        Returns
        -------
        None.

        """
        num_params = np.arange(1, self.train.shape[1] - 1, stride)
        # f1, acc, sens, spec, auc
        metrics = np.zeros(shape=(len(num_params), 10))

        t0 = dt()

        for i, parm in enumerate(num_params):
            cur_mod_reg = LogisticRegression(C=self.opt_c,
                                             max_iter=10000,
                                             penalty="l1",
                                             solver='liblinear',
                                             random_state=random_state)
            # Cross-Val fully reduced model
            cols_to_include = list(coef_ord_red.index[0:parm])
            cur_mod_cv_results = crossVal(self.train.loc[:, cols_to_include],
                                          self.train.loc[:, self.target_var],
                                          5,
                                          cur_mod_reg,
                                          print_=False)

            metrics[i, 0:2] = (
                np.mean(cur_mod_cv_results['Out of Sample']["Accuracy"]),
                np.std(cur_mod_cv_results['Out of Sample']["Accuracy"],
                       ddof=1))
            metrics[i,
                    2:4] = (np.mean(cur_mod_cv_results['Out of Sample']["F1"]),
                            np.std(cur_mod_cv_results['Out of Sample']["F1"],
                                   ddof=1))
            metrics[i, 4:6] = (
                np.mean(cur_mod_cv_results['Out of Sample']["Sens/Recall"]),
                np.std(cur_mod_cv_results['Out of Sample']["Sens/Recall"],
                       ddof=1))
            metrics[i, 6:8] = (
                np.mean(cur_mod_cv_results['Out of Sample']["Specificity"]),
                np.std(cur_mod_cv_results['Out of Sample']["Specificity"],
                       ddof=1))
            metrics[i,
                    8:10] = (np.mean(
                        cur_mod_cv_results['Out of Sample']["AUC"]),
                             np.std(cur_mod_cv_results['Out of Sample']["AUC"],
                                    ddof=1))

            updateProgBar(i + 1, len(num_params), t0)
        self.forwardElimMetrics = metrics
        self.num_params_forward = num_params
Beispiel #12
0
         
        
if __name__ == '__main__':
    import numpy as np
    try:
        print(len(mdl.V))
    except:
        from dill import load
        mdl = load(open('mdl.pkl','rb+'))
        
        
    np.random.seed(12)



    q = dt()
    o = AgentsEst(mdl,T=30,verbose=False)
    ss_val = o.ss_val
    kf_val = o.kf_val
    
    print('total time is {:02.1f} sec'.format(dt()-q))
    
    
    '''
    pmeet_np = []
    ppreg_np = []
    
    
    for t in range(4):
    
        print('t = {}'.format(t))
Beispiel #13
0
    def train_epoch(
        self, train_loader, lang_loaders,
        epoch, valid_loaders=[], log_interval=50,
        valid_interval=500, path=''
    ):

        lang_iters = [
            DataIterator(
                loader=loader,
                device=self.device,
                non_stop=True
            )
            for loader in lang_loaders
        ]

        pbar = lambda x: x
        if self.master:
            pbar = lambda x: tqdm(
                x, total=len(x),
                desc='Steps ',
                leave=False,
            )

        for batch in pbar(train_loader):
            self.model.train()

            # Update progress bar
            self.optimizer.zero_grad()

            begin_forward = dt()

            multimodal_loss = self.model.forward_multimodal_loss(batch)
            iteration = self.model.multimodal_criterion.iteration
            adjusted_iter = self.world_size * iteration

            # Cross-language update
            total_lang_loss = 0.
            loss_info = {}
            for lang_iter in lang_iters:

                lang_data = lang_iter.next()
                lang_loss = self.model.forward_multilanguage_loss(*lang_data)
                total_lang_loss += lang_loss
                loss_info[f'train_loss_{str(lang_iter)}'] = lang_loss

            total_loss = multimodal_loss + total_lang_loss
            total_loss.backward()

            norm = 0.
            if self.clip_grad > 0:
                norm = clip_grad_norm_(
                    self.model.parameters(),
                    self.clip_grad
                )

            self.optimizer.step()
            if self.lr_scheduler is not None:
                self.lr_scheduler.step()

            end_backward = dt()
            batch_time = end_backward-begin_forward

            train_info = Dict({
                'loss': multimodal_loss,
                'iteration': iteration,
                'total_loss': total_loss,
                'k': self.model.multimodal_criterion.k,
                'batch_time': batch_time,
                'countdown': self.count,
                'epoch': epoch,
                'norm': norm,
            })

            train_info.update(loss_info)

            for param_group in self.optimizer.param_groups:
                if 'name' in param_group:
                    train_info.update({f"lr_{param_group['name']}": param_group['lr']})
                else:
                    train_info.update({'lr_base': param_group['lr']})

            if self.master:
                logger.tb_log_dict(
                    tb_writer=self.tb_writer, data_dict=train_info,
                    iteration=iteration, prefix='train'
                )

            if iteration % valid_interval == 0:

                # Run evaluation
                metrics, metric_value = self.evaluate_loaders(valid_loaders)

                # Update early stop variables
                # and save checkpoint
                if metric_value < self.best_val:
                    self.count -= 1
                elif not self.save_all:
                    self.count = self.early_stop
                    self.best_val = metric_value

                if self.master:
                    self.save(
                        path=self.path,
                        is_best=(metric_value >= self.best_val),
                        args=self.args,
                        rsum=metric_value,
                    )

                    # Log updates
                    for metric, values in metrics.items():
                        self.tb_writer.add_scalar(metric, values, iteration)

                # Early stop
                if self.count == 0 and self.master:
                    self.sysoutlog('\n\nEarly stop\n\n')
                    return False

            if iteration % log_interval == 0 and self.master:
                helper.print_tensor_dict(train_info, print_fn=self.sysoutlog)

                if self.log_histograms:
                    logger.log_param_histograms(
                        self.model, self.tb_writer,
                        iteration=self.model.multimodal_criterion.iteration,
                    )
        return True
Beispiel #14
0
    def fit(self, X, y):
        """Fit the model

        Parameters
        ----------
        X : numpy array
            The feature (or design) matrix.
        y : numpy array
            The response variable.

        Returns
        -------
        self
            Updates internal attributes, such as `coef_` and `intercept_`.

        """
        t0 = dt()
        X = check_array(X,
                        force_all_finite='allow-nan',
                        estimator=self,
                        copy=True)
        if not self.has_constant:
            X = add_constant(X, prepend=True)

        if self.start is not None:
            pass
        else:
            if self.verbose:
                print_time("Initializing Coefficients...",
                           t0,
                           dt(),
                           backsn=True)
            if self.initialize_weights == 'sklearn':
                C = self.C
                if C is None:
                    C = 1
                # TODO: update with Weibull Regression starting values
                ### If using sklearn version 0.23.2, can use this line instead
                #mod = glm_pois(alpha=1/C, fit_intercept=False, max_iter=1000).fit(X, y)
                #self.start = mod.coef_.reshape(-1, )
                ### else, use statsmodels
                mod = glm_pois(y, X, family=Poisson()).fit()
                self.start = mod.params.reshape(-1, )
                if self.extra_params > 0:
                    self.start = np.concatenate(
                        (self.start, np.repeat(1, self.extra_params)))
            elif self.initialize_weights == 'ones':
                self.start = np.ones(shape=X.shape[1] + self.extra_params)
            elif self.initialize_weights == 'random':
                self.start = np.random.normal(X.shape[1] + self.extra_params)
            else:
                self.start = np.zeros(shape=X.shape[1] + self.extra_params)

        if self.verbose:
            print_time("Beginning MCMC...", t0, dt(), backsn=True)

        postArgs = {
            'X': X,
            'Y': y,
            'l_scale': self.C if self.C is None else 2 * self.C
        }

        algo_res = applyMCMC(st=self.start,
                             ni=self.niter,
                             lp=self.lpost,
                             algo=self.algo,
                             postArgs=postArgs,
                             algoOpts=self.algo_options,
                             sd=self.retry_sd,
                             max_tries=self.retry_max_tries)

        self.mcmc_params = algo_res['parameters']
        self.prev_vals = algo_res['prev_vals']

        self.coef_, self.intercept_, self.extra_params_sum_ = self._create_coefs(
            self.mcmc_params, self.param_summary, self.extra_params)
        self.n_iter_ = self.niter

        #get model summaries
        weights = _check_sample_weight(None, X)
        y_pred = self.predict(X[:, 1:])
        y_mean = np.average(y, weights=weights)
        dev = np.sum(weights * (2 * (xlogy(y, y / y_pred) - y + y_pred)))
        dev_null = np.sum(weights * (2 * (xlogy(y, y / y_mean) - y + y_mean)))
        self.deviance_ = dev
        self.null_deviance_ = dev_null
        self.pearson_residuals_ = (y - y_pred) / np.sqrt(y_pred)
        self.pearson_chi2_ = np.sum(self.pearson_residuals_**2)
        self.model_d2_ = 1 - dev / dev_null
        self.df_model_ = X.shape[1] - 1
        self.df_residuals_ = X.shape[0] - X.shape[1]
        self.dispersion_scale_ = self.pearson_chi2_ / self.df_residuals_
        self.dispersion_scale_sqrt_ = np.sqrt(self.dispersion_scale_)

        return self
from tsdst.metrics import r2
from tsdst.nn.model import NeuralNetwork


def cust_r2(y_true, y_pred):
    return np.mean([r2(y_true[:, i], y_pred[:, i]) for i in range(y_true.shape[1])])


X_og, Y_og = make_regression(n_samples=1000, n_features=50, 
                             n_informative=20, n_targets=1,
                             bias=0.0, effective_rank=None, tail_strength=0.5,
                             noise=0.0, shuffle=True, coef=False,
                             random_state=42)
 
t0 = dt()

X_train, X_test, Y_train, Y_test = train_test_split(X_og, Y_og, test_size=0.3,
                                                    random_state=42)

Y_train = Y_train.reshape(X_train.shape[0], -1)
Y_test = Y_test.reshape(X_test.shape[0], -1)

model = {
         'hidden0': {'depth': 10,
                     'activation': 'relu',
                     'derivative': 'relu_der',
                     'activation_args': {},
                     'initializer': 'he_uniform',
                     'dropout_keep_prob': 1,
                     'lambda': 0.01,
Beispiel #16
0
    def fit(self, X, y):
        """Fit the model

        Parameters
        ----------
        X : numpy array
            The feature (or design) matrix.
        y : numpy array
            The response variable.

        Returns
        -------
        self
            Updates internal attributes, such as `coef_` and `intercept_`.

        """
        t0 = dt()
        X = check_array(X,
                        force_all_finite='allow-nan',
                        estimator=self,
                        copy=True)
        if not self.has_constant:
            X = add_constant(X, prepend=True)

        if self.start is not None:
            pass
        else:
            if self.verbose:
                print_time("Initializing Coefficients...",
                           t0,
                           dt(),
                           backsn=True)
            if self.initialize_weights == 'sklearn':
                C = self.C
                if C is None:
                    C = 1
                mod = glm_lr(C=C,
                             solver='liblinear',
                             penalty='l1',
                             fit_intercept=False).fit(X, y)
                self.start = mod.coef_.reshape(-1, )
                if self.extra_params > 0:
                    self.start = np.concatenate(
                        (self.start, np.repeat(1, self.extra_params)))
            elif self.initialize_weights == 'ones':
                self.start = np.ones(shape=X.shape[1] + self.extra_params)
            elif self.initialize_weights == 'random':
                self.start = np.random.normal(size=(X.shape[1] +
                                                    self.extra_params, ))
            else:
                self.start = np.zeros(shape=X.shape[1] + self.extra_params)

        if self.verbose:
            print_time("Beginning MCMC...", t0, dt(), backsn=True)

        postArgs = {
            'X': X,
            'Y': y,
            'l_scale': self.C if self.C is None else 2 * self.C
        }

        algo_res = applyMCMC(st=self.start,
                             ni=self.niter,
                             lp=self.lpost,
                             algo=self.algo,
                             postArgs=postArgs,
                             algoOpts=self.algo_options,
                             sd=self.retry_sd,
                             max_tries=self.retry_max_tries)

        self.mcmc_params = algo_res['parameters']
        self.prev_vals = algo_res['prev_vals']

        self.coef_, self.intercept_, self.extra_params_sum_ = self._create_coefs(
            self.mcmc_params, self.param_summary, self.extra_params)
        self.n_iter_ = self.niter
        self.classes_ = np.unique(y)

        if self.over_dispersion:
            self.dispersion_estimation_ = self.extra_params_sum_[-1]
        else:
            self.dispersion_estimation_ = None

        if self.verbose:
            print_time("Finished MCMC. Stored Coefficients...",
                       t0,
                       dt(),
                       backsn=True)

        return self
Beispiel #17
0
    def fit(self, X, y):
        """Fit the model

        Parameters
        ----------
        X : numpy array
            The feature (or design) matrix.
        y : numpy array
            The response variable.

        Returns
        -------
        self
            Updates internal attributes, such as `coef_` and `intercept_`.

        """
        t0 = dt()
        X = check_array(X,
                        force_all_finite='allow-nan',
                        estimator=self,
                        copy=True)
        if not self.has_constant:
            X = add_constant(X, prepend=True)

        if self.start is not None:
            pass
        else:
            if self.verbose:
                print_time("Initializing Coefficients...",
                           t0,
                           dt(),
                           backsn=True)
            if self.initialize_weights == 'sklearn':
                C = self.C
                if C is None:
                    C = 1
                try:
                    from sklearn.linear_model import PoissonRegressor as glm_pois_sk
                    mod = glm_pois_sk(alpha=1 / C,
                                      fit_intercept=False,
                                      max_iter=1000).fit(X, y)
                    self.start = mod.coef_.reshape(-1, )
                except ImportError:
                    print(
                        'Older sklearn, no PoissonRegressor. Using statsmodels instead'
                    )
                    mod = glm_pois(y, X, family=Poisson()).fit()
                    self.start = mod.params.reshape(-1, )
                if self.extra_params > 0:
                    self.start = np.concatenate(
                        (self.start, np.repeat(1, self.extra_params)))
            elif self.initialize_weights == 'ones':
                self.start = np.ones(shape=X.shape[1] + self.extra_params)
            elif self.initialize_weights == 'random':
                self.start = np.random.normal(size=(X.shape[1] +
                                                    self.extra_params, ))
            else:
                self.start = np.zeros(shape=X.shape[1] + self.extra_params)

        if self.verbose:
            print_time("Beginning MCMC...", t0, dt(), backsn=True)

        postArgs = {
            'X': X,
            'Y': y,
            'l_scale': self.C if self.C is None else 2 * self.C
        }

        algo_res = applyMCMC(st=self.start,
                             ni=self.niter,
                             lp=self.lpost,
                             algo=self.algo,
                             postArgs=postArgs,
                             algoOpts=self.algo_options,
                             sd=self.retry_sd,
                             max_tries=self.retry_max_tries)

        self.mcmc_params = algo_res['parameters']
        self.prev_vals = algo_res['prev_vals']

        self.coef_, self.intercept_, self.extra_params_sum_ = self._create_coefs(
            self.mcmc_params, self.param_summary, self.extra_params)
        self.n_iter_ = self.niter

        # get model summaries
        if self.over_dispersion:
            self.dispersion_delta_ = self.extra_params_sum_[-1]
            self.dispersion_estimation_ = 1 / (1 - self.dispersion_delta_)**2
        else:
            self.dispersion_delta_ = 0
            self.dispersion_estimation_ = None

        ddu = self._deviance_dispersion_update(X[:, 1:], y, sample_weight=None)
        self.deviance_ = ddu['deviance_']
        self.null_deviance_ = ddu['null_deviance_']
        self.pearson_residuals_ = ddu['pearson_residuals_']
        self.pearson_chi2_ = ddu['pearson_chi2_']
        self.model_d2_ = ddu['model_d2_']
        self.df_model_ = ddu['df_model_']
        self.df_residuals_ = ddu['df_residuals_']
        self.df_total_ = ddu['df_total_']
        self.dispersion_scale_ = ddu['dispersion_scale_']
        self.dispersion_scale_sqrt_ = ddu['dispersion_scale_sqrt_']

        return self
Beispiel #18
0
        continue
    else:
        break
#최대값 넘길시 20으로 재조정
opportunity = min(opportunity, 20)

# ======================================
# =============구구단 파트==============
# ======================================
count: int = 0
probs: list[str] = []
correctAns: int = 0
wrongAns: int = 0
timeout: int = 3
#전체 시험 타이머 시작
initialtime = dt()
st: float = 0.0
#이제 그냥 가장 앞 n개만 뽑으면 됨
print("문제시작:", end="")
while count < opportunity:
    #그냥 앞에서부터 하나씩 긁어오기만 해도 충-분
    realAns = (googoolist[count][0] * googoolist[count][1])
    #문제 타이머 시작
    st = dt()
    try:
        #인풋 받기
        inp = int(input("%dX%d = " % googoolist[count]))
        #정답을 맞추었고
        if (inp == realAns):
            #시간안에 풀으면 정답처리
            if ((dt() - st) < 3):
                   num_iterations=n_iters,
                   optimizer='adam',
                   optimizer_args={
                       'learning_rate': 0.001,
                       'beta1': 0.9,
                       'beta2': 0.999,
                       'eps': 1e-8
                   },
                   m_scale=1,
                   bn_tol=1e-6,
                   bn_momentum=0,
                   shuffle=False,
                   print_cost=True,
                   random_state=42)

t0 = dt()
nn = nn.fit(X_train, Y_train_oh)
t1 = dt()

print('tsdst Runtime (s):', t1 - t0)

#################################################################
################### Comparison With Keras #######################
#################################################################

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, BatchNormalization
from tensorflow.keras.regularizers import l1, l2, l1_l2
from tensorflow.keras.initializers import RandomUniform, TruncatedNormal
from tensorflow.keras.optimizers import Adam
Beispiel #20
0
def latinHypercube1D(data,
                     sampleSize,
                     random_state=None,
                     shuffle_after=True,
                     sort_=True,
                     sort_method="quicksort",
                     sort_cols=None,
                     stratified=True,
                     bin_placement="random",
                     verbose=False):
    """
    Creates a sample from a Dataframe (data). If replace=True,
    this function can be used for bootstrap samples. 

    Parameters
    ----------
    data : numpy array or pandas dataframe
        The design or feature matrix (with response).
    sampleSize : int
        The number of samples to return.
    random_state : int, optional
        The random seed of the process. The default is None.
    shuffle_after : bool, optional
        Shuffle the results after being sampled. The default is True.
    sort_ : bool, optional
        Sort the data (only set to False if sending presorted data).
        The default is True.
    sort_method : str, optional
        Numpy sort method. The default is "quicksort".
    sort_cols : list, optional
        The columns to include in the sorting. The default is None.
    stratified : bool, optional
        Create a stratified sample. The default is True.
    bin_placement : str, optional
        Method for placing the edges on the sampling bins.
        The default is "random".
    verbose : bool, optional
        Print the results of the process. The default is False.

    Raises
    ------
    ValueError
        Raised if invalid bin_placement is passed.

    Returns
    -------
    LHC : pandas dataframe or numpy array
        The samples.

    """
    t0 = None
    if verbose:
        # Initialize time for printing
        t0 = dt()
        print_time("\nInitializing...", t0, te=dt())

    # Initialize
    sortedData = None
    df = False

    # Check if DataFrame for convenience later
    if isinstance(data, pd.DataFrame):
        df = True

    if sort_:
        if verbose:
            print_time("\nSorting...", t0, te=dt())

        # Convert to DataFrame, if not already (for easy
        # column sorting control)
        # TODO: see if column control could be equally as easy without
        # converting to DF
        if df:
            sortedData = data.copy(deep=True)
        else:
            sortedData = pd.DataFrame(data)

        if sort_cols is not None:
            sortedData = sortedData.sort_values(sort_cols,
                                                axis=0,
                                                kind=sort_method)
        else:
            sortedData = sortedData.sort_values(list(sortedData.columns),
                                                axis=0,
                                                kind=sort_method)

    if random_state is not None:
        np.random.seed(random_state)

    if verbose:
        print_time("\nShaping...", t0, te=dt())

    if sortedData is not None:
        sortedData = sortedData.values.reshape(data.shape[0], -1)
    else:
        sortedData = np.array(data).reshape(data.shape[0], -1)

    rows = sortedData.shape[0]
    cols = sortedData.shape[1]
    LHC = np.zeros(shape=(sampleSize, cols), dtype=sortedData.dtypes)
    splits = sampleSize

    if verbose:
        print_time("\nCreating the bins...", t0, te=dt())

    if stratified:
        high = int(np.ceil(rows / sampleSize))
        low = int(np.floor(rows / sampleSize))
        rem = rows % sampleSize
        if rem != 0:
            rem2 = sampleSize - rem
            if bin_placement == "random":
                f_array = np.repeat((high, low), (rem, rem2))
                np.random.shuffle(f_array)
            elif bin_placement == "spaced":
                if rem > rem2:
                    r1 = np.repeat(high, rem)
                    start = 1
                    end = rem + 1
                    a1 = np.arange(start,
                                   end,
                                   np.floor(rem / rem2),
                                   dtype=np.int)[:rem2]
                    f_array = np.insert(r1, a1, low)
                else:
                    r1 = np.repeat(low, rem2)
                    start = 1
                    end = rem2 + 1
                    a1 = np.arange(start,
                                   end,
                                   np.floor(rem2 / rem),
                                   dtype=np.int)[:rem]
                    f_array = np.insert(r1, a1, high)
            else:
                raise ValueError(
                    "Not a valid bin placement. Change to 'spaced' or "
                    "'random'. To order the bins from high to low, change "
                    "stratified to False")
        else:
            f_array = np.repeat(high, sampleSize)
        splits = np.cumsum(f_array)[:-1]
    Splits = np.array_split(sortedData, splits)

    if verbose:
        print_time("\nSampling...", t0, te=dt())

    t1 = dt()
    for i, sample in enumerate(Splits):
        LHC[i, :] = sample[np.random.choice(sample.shape[0], 1)]
        if verbose:
            updateProgBar(i + 1, sampleSize, t1)

    if shuffle_after:
        if verbose:
            print_time("\nShuffling...", t0, te=dt())
        np.random.shuffle(LHC)

    if df:
        if verbose:
            print_time("\nConverting to DataFrame...", t0, te=dt())
        LHC = pd.DataFrame(LHC, columns=data.columns)

    if verbose:
        print_time("\nFinished...", t0, te=dt())
    return LHC
Beispiel #21
0
    def RunFullAnalysis(self,
                        freshStart=True,
                        downsample=False,
                        dropCols=None,
                        dropCor=True,
                        corr_cutoff=0.9,
                        dropVar=True,
                        dropVarTol=0.001,
                        cv_iterations=5,
                        random_state=123,
                        default_penalty=0.01,
                        verbose=True,
                        try_parallel=True,
                        for_stride=2,
                        reduce_features_by=30,
                        num_cs=100,
                        red_metric='bic',
                        red_log_low=-5,
                        red_log_high=1,
                        n_jobs=1,
                        remove_msg=True,
                        chunk=False):
        """
        Performs a quick analysis by first considering which features/variables
        may be uninformative predictors based on preliminary models. Then,
        it performs a very basic forward selection procedure to limit
        predictors in the final model. Currently, this only supports
        Logistic Regression.

        Parameters
        ----------
        freshStart : bool, optional
            Whether to use a previous result, or start from scrath.
            The default is True.
        downsample : bool, optional
            Whether or not to downsample the majority class.
            The default is False.
        dropCols : list, optional
            List of columns to initially drop. The default is None.
        dropCor : bool, optional
            Drop variables with high correlations. The default is True.
        corr_cutoff : float, optional
            Cutoff for removing high correlation variables. Anything below the
            cutoff is kept. The default is 0.9.
        dropVar : bool, optional
            Drop variables with very low variance.
            Does nothing if data is standardized. The default is True.
        dropVarTol : float, optional
            Cutoff value for low variance. The default is 0.001.
        cv_iterations : int, optional
            The number of cross validation steps. The default is 5.
        random_state : int, optional
            The random seed for the process. The default is 123.
        default_penalty : float, optional
            The penalty term to be applied to models where the penalty is
            otherwise unspecified. The default is 0.01.
        verbose : bool, optional
            Print steps as they complete. The default is True.
        try_parallel : bool, optional
            Try to perform the operation in parallel. If it fails, it will
            continue without parallel operations. The default is True.
        for_stride : int, optional
            The gap between variables evaluated for the forward selection
            metric. The default is 2.
        reduce_features_by : int, optional
            The size of the final model. The default is 30.
        num_cs : int, optional
            number of penalty values to test (i.e. number of models to evaluate
            during AIC calculations). The default is 100.
        red_metric : str, optional
            The metric used to optimize model reductions. The default is 'bic'.
        red_log_low : float, optional
            The logspace minimum value for evaluating the L1 penalty.
            The default is -5.
        red_log_high : float, optional
            The logspace maximum value for evaluating the L1 penalty.. The default is 1.
        n_jobs : int, optional
            The number of processes to attempt. The default is 1.
        remove_msg : bool, optional
            Remove messagepacks. The default is True.
        chunk : bool, optional
            Chunk msgpacks? The default is False.

        Returns
        -------
        None.

        """
        t0 = dt()
        self._full_analysis_arglist = {
            'freshStart': freshStart,
            'downsample': downsample,
            'dropCols': dropCols,
            'dropCor': dropCor,
            'corr_cutoff': corr_cutoff,
            'dropVar': dropVar,
            'dropVarTol': dropVarTol,
            'cv_iterations': cv_iterations,
            'random_state': random_state,
            'default_penalty': default_penalty,
            'try_parallel': try_parallel,
            'for_stride': for_stride,
            'reduce_features_by': reduce_features_by,
            'num_cs': num_cs,
            'red_metric': red_metric,
            'red_log_low': red_log_low,
            'red_log_high': red_log_high,
            'n_jobs': n_jobs,
            'verbose': verbose,
            'remove_msg': remove_msg
        }

        self.BaselineDropProcedures(freshStart=freshStart,
                                    downsample=downsample,
                                    dropCols=dropCols,
                                    dropCor=dropCor,
                                    corr_cutoff=corr_cutoff,
                                    dropVar=dropVar,
                                    dropVarTol=dropVarTol,
                                    cv_iterations=cv_iterations,
                                    random_state=random_state,
                                    default_penalty=default_penalty,
                                    verbose=verbose,
                                    t0=t0)

        if verbose:
            print_time("\nCreating Lasso Reduction Models...", t0, te=dt())

        self.reduction = np.logspace(red_log_low, red_log_high, num_cs)
        if try_parallel:
            try_again = True
            while (try_again):
                try:
                    self.calcAICs(num_cs,
                                  try_parallel,
                                  n_jobs,
                                  random_state=random_state,
                                  remove_msg=remove_msg,
                                  chunk=chunk)
                    try_again = False
                except (MemoryError):
                    n_jobs = int(n_jobs - (n_jobs / 2))
                    print('\n\tReduced n_jobs to ', n_jobs)
                    if n_jobs == 1:
                        try_again = False
                    if not try_again:
                        self.calcAICs(num_cs,
                                      False,
                                      n_jobs=1,
                                      random_state=random_state,
                                      remove_msg=remove_msg,
                                      chunk=chunk)
        else:
            self.calcAICs(num_cs,
                          False,
                          n_jobs=1,
                          random_state=random_state,
                          remove_msg=remove_msg,
                          chunk=chunk)

        self.ForwardSelectionAnalysis(cv_iterations=cv_iterations,
                                      random_state=random_state,
                                      default_penalty=default_penalty,
                                      for_stride=for_stride,
                                      reduce_features_by=reduce_features_by,
                                      red_metric=red_metric,
                                      verbose=verbose,
                                      t0=t0)
Beispiel #22
0
        int: the number of the prime numbers smaller than num
    """
    currentno = 2 #check from 2
    count =0
    #check all number smaller or equal to n
    #it seems that while is faster than for...
    while currentno <= num:
        if(isPrime(currentno)):
            count = count +1
        currentno = currentno+1

    return count


#test code
startt = dt() #initialize Timer
print("prime numbers in 0-10 : %d"%numberOfPrime(10))
print("eleapsed time : %.2fms"%((dt()-startt)*1000))
startt = dt() #initialize Timer
print("prime numbers in 0-100 : %d"%numberOfPrime(100))
print("eleapsed time : %.2fms"%((dt()-startt)*1000))
startt = dt() #initialize Timer
print("prime numbers in 0-1000 : %d"%numberOfPrime(1000))
print("eleapsed time : %.2fms"%((dt()-startt)*1000))
startt = dt() #initialize Timer
print("prime numbers in 0-10000 : %d"%numberOfPrime(10000))
print("eleapsed time : %.2fms"%((dt()-startt)*1000))
startt = dt() #initialize Timer
print("prime numbers in 0-100000 : %d"%numberOfPrime(100000))
print("eleapsed time : %.2fms"%((dt()-startt)*1000))
Beispiel #23
0
    #initialize variables
    result = False
    i = 1
    #check the number smaller than square
    while i < num**(0.5):
        i = i + 1
        #if the number can be devided by other number, its not prime
        if (num % i == 0):
            result = True
    #return the value
    return not (result)


n: int = 0
primenumbers: list[int] = [2]
startt: float = dt()
try:
    n = int(input("type the number:"))
except ValueError:
    exit  # if error, just exit the program
currentno = 2  #check from 2
count = 0
#check all number smallor or equal to n
while currentno <= n:
    if (isprime(currentno)):
        primenumbers.append(currentno)
        count = count + 1
    currentno = currentno + 1

print("%d이하 소수의 갯수 : %d" % (n, count))
Beispiel #24
0
def numpy_call(x):
    start_time = dt()
    x *= 2.0
    rt = (dt() - start_time) * 1000
    # print("Python numpy took %d ms for %d floats" % (rt, x.size))
    return rt
# Authors: Jaideep Reddy, Deepika Bisht (BML Munjal University Gurgaon, India)
#
# Last Modified:18-07-2021

#importing multiprocessing libraries
from timeit import default_timer as dt
import numpy as np
from multiprocessing import Process, Array, Value

#provide the following
cores = 18  # no of cores
nodes = 4  # no of nodes
gsize = 0.5  # grid size
node = 0  # node number

st = dt()
veckey = list(QgsProject.instance().mapLayers().keys())[
    1]  # Get the key for map/india layer
instancelayer = QgsProject.instance().mapLayers()[
    veckey]  # Use key to get the map/india layer
key = list(
    QgsProject.instance().mapLayers().keys())[0]  # Get the key for grid layer
grid = QgsProject.instance().mapLayers()[
    key]  # Use grid key to get the grid layer


# This function : counter() returns the count of boxes that contains a part of coastline / intersects with coastline.
def counter(gridfts, instvectors, arr, i):
    cnt = 0
    for feature in gridfts:
        # Get the features of map within boundary of grid feature
Beispiel #26
0
    for pn in primenumbers:
        if (pn > (num**(0.5))):
            break  #exit the loop if all prime numbers are checked
        if (num % pn) == 0:
            return False

    #2nd check : devide by the numbers within the maximum primenumbers - root(num)
    for nn in range(pn, int(num**0.5)):
        if (num % nn) == 0:
            return False
    #For now, the number is prime number so add it to the list
    primenumbers.append(num)
    return True


startt = dt()  #initialize Timer
n: int = 0
try:
    n = int(input("type the number:"))
except ValueError:
    exit  # if error, just exit the program
currentno = 2  #check from 2
count = 0
#check all number smallor or equal to n
while currentno <= n:
    if (isprime_imp(currentno)):
        count = count + 1
    currentno = currentno + 1

print("%d이하 소수의 갯수 : %d" % (n, count))
print("목록", primenumbers.__str__())
Beispiel #27
0
W = c_long(x.shape[1])
P = x.ctypes.data_as(c_void_p)  # The C pointer

# The C function header is:
# void double_me(void *buffer, const int W, const int H);

# Lets call the inplace C function:
double_me_lib.double_me(P, W, H)

# Operations are inplace there for we can see the result in same array.
print("Same Array after c code:\n", x)

# Now Let's time it

x = np.random.randn(10000, 10000).astype(np.float32)
start_time = dt()
x = np.ascontiguousarray(x)
print("Flatten took %d ms for %d floats" %
      ((dt() - start_time) * 1000, x.size))


def numpy_call(x):
    start_time = dt()
    x *= 2.0
    rt = (dt() - start_time) * 1000
    # print("Python numpy took %d ms for %d floats" % (rt, x.size))
    return rt


def opencv_call(x):
    start_time = dt()
Beispiel #28
0
def forwardSelection(XY, target_var, model, metric='bic', family='gaussian',
                     verbose=True, n_jobs=1, early_stop=False, perc_min=0.01,
                     stop_at_p=1000, stop_when=5, use_probabilities=False,
                     return_type='all', serialze_flavor='feather',
                     use_threads=True):
    """
    A forward selection algorithm for classification only right now. Still 
    needs some work.
    
    Note: it is a known bug for this function to fail when n_jobs > 1 while
    using sypder. This is because of an issue with spyder and the p_prog_simp
    function. I have not been able to discover why, or provide a fix.
    If you run a script with this function from the console, it should run
    just fine. 

    Parameters
    ----------
    XY : pandas dataframe
        The combined independent variables/features and reponse/target
        variable.
    target_var : str
        The column containing the target (or response) variable.
    model : sklearn, or similar
        An unfitted model. Any model that has a fit and predict method.
    metric : str
        The AIC metric to be used, for example, aic, aicc, bic, ebic, hastie, 
        or kwano.
    family : str
        The family of distributions to calculate log-likelihood from.
    verbose : bool, optional
        Output the steps and progress as it completes. The default is True.
    n_jobs : int, optional
        If greater than 1, perform operation in parallel. The default is 1.
    perc_min : float, optional
        --NOT IMPLEMENTED--. The percent change minimum for breaking early,
        if no meanigful change in metric is detected. The default is 0.01.
    stop_at_p : int, optional
        The number of selections to stop at. In other words, it selects up to
        `p` predictors, even if the optimal model is not yet found and more
        tests could be done. The default is 1000.
    stop_when : int, optional
        Stop the operations when the metric no longer continues to decrease,
        after evaluating the next stop_when columns. The default is 5.
    use_probabilities : bool, optional
        Whether or not to use predicted probabilities as the only other feature
        in the set, or to use the actual features in performing the forward 
        selection. The default is False.
    return_type : str, optional
        Which object to return, can be either list, model, data, or all.
        The default is 'all'.
    serialize_flavor : str
        Which mode of downsaving data to use, currently supports 'feather' and
        'msgpack'. Unfortunately, as of pandas 0.25.0, msgpack is no longer 
        supported.
    use_threads : bool, optional
        Use threads instead of processes for parallel operation.f
        
        Note: using feather requires pyarrow

    Returns
    -------
    list, model, data, or tuple of objects
        Either return as list, model, data, or all. The default is 'all'.

    """
    
    t0 = dt()
    
    total_possible_complexity = int(((XY.shape[1]-1)**2) - ((XY.shape[1]-1)*((XY.shape[1]-1) - 1)/2))
    
    if early_stop:
        if stop_at_p >= (XY.shape[1] - 1):
            complexity = total_possible_complexity
            stop_at_p = XY.shape[1] - 1
        else:
            complexity = np.sum((XY.shape[1] - 1) - np.arange(0, stop_at_p))
    else:
        complexity = total_possible_complexity
        stop_at_p = XY.shape[1] - 1
    
    
    if verbose:
        print_time("Problem Complexity: " + str(complexity) + " Iterations Needed, " +
                   str(total_possible_complexity) + " Possible...",
                   t0, te=dt(), backsn=True)
    
    # Use this for an exhaustive search
    #all_combos = list(powerset(X.columns))
    final_mod_cols = []
    leftover_cols = [x for x in XY.columns if x != target_var]
    currentScore = np.inf
    #early_stop_counter = 0
    loop_counter = 0
    
    if n_jobs > 1:
        if verbose:
            print_time("\nPreparing Parallel Operation...", t0, te=dt())
            
        save_name, save_path, save_ext, num_chunks = _prepare_for_parallel(XY)
        arg = {'save_path': save_path,
               'save_name': save_name,
               'save_ext': save_ext,
               'metric': metric,
               'model': model,
               'target_var': target_var,
               'family': family,
               'use_probabilities': use_probabilities,
               'mod_cols': final_mod_cols,
               'Yprob': None
              }
    
    for i in range(XY.shape[1] - 1):
        scores = []
        if use_probabilities and i > 1:
            # create current model and probability array
            initial_fit = model.fit(XY.loc[:, final_mod_cols], XY.loc[:, target_var])
            Yprob = initial_fit.predict_proba(XY.loc[:, final_mod_cols])[:, 1]
            #print('\nprob gen step: ', Yprob.shape, "\n")
            if n_jobs > 1:
                arg['Yprob'] = Yprob
        
        if n_jobs > 1:
            if verbose:
                print_time("\nPerforming " + str(i + 1) + " of " + str(stop_at_p) + " Steps...",
                           t0, te=dt(), backsn=True)
            loop_arg = [{'new_predictor': [col]} for col in leftover_cols]
            scores = p_prog_simp(arg, loop_arg, _doParallelForward, n_jobs,
                                 use_threads=use_threads)
            #return scores
            if verbose:
                loop_counter += len(loop_arg)
                updateProgBar(loop_counter, complexity, t0)
        else:
            for k, col in enumerate(leftover_cols):
                if use_probabilities and i > 1:
                    #print('loop step: ', XY.loc[:, col].values.shape, "\n")
                    XY_prob = pd.DataFrame({'Yprob': Yprob, col: XY.loc[:, col].values}, index=XY.index)
                    score = _calcForwardAICs(XY_prob, XY.loc[:, target_var],
                                             model, metric, family)
                else:
                    score = _calcForwardAICs(XY.loc[:, final_mod_cols + [col]], XY.loc[:, target_var],
                                             model, metric, family)
                scores.append(score)
            
                if verbose:
                    loop_counter += 1
                    updateProgBar(loop_counter, complexity, t0)
        
        minScore = np.min(scores)
        if minScore > currentScore:
            print("\nNo further imporovement in the model. Breaking...")
            break
        else:
            currentScore = minScore
        
        minScoreLoc = np.where(minScore == np.array(scores))[0][0]
        final_mod_cols.append(leftover_cols[minScoreLoc])
        del leftover_cols[minScoreLoc]
        
        # could be useful if I wanted to do something like accuracy,
        # but for AIC/BIC, this isn't necessary
        #if np.abs(percentIncrease(minScore, prev_minScore)) < perc_min:
        #    early_stop_counter += 1
        #else:
        #    early_stop_counter = 0
        #
        #if early_stop_counter >= stop_when:
        #    print('Stopped Early')
        #    break
        #if i >= stop_at_p:
        #    print('Stopped at ', str(i), ' selected predictors')
        #    break

    if n_jobs > 1:
        file_ = save_path + save_name + save_ext
        try:
            os.remove(file_)
        except FileNotFoundError:
            print('Could Not find file ', file_, 'Continuing...')
            
    if return_type == 'list':
        return final_mod_cols
    elif return_type == 'model':
        return model.fit(XY.loc[:, final_mod_cols], XY.loc[:, target_var])
    elif return_type == 'data':
        return XY.loc[:, final_mod_cols]
    else:
        return final_mod_cols, model.fit(XY.loc[:, final_mod_cols],
                                         XY.loc[:, target_var]), XY.loc[:, final_mod_cols]
Beispiel #29
0
import numpy as np
from sklearn.preprocessing import StandardScaler
from timeit import default_timer as dt

begin = dt()
print('1')
train = np.load('/A/VSE/data/coco_tensor_precomp/train_ims.npy')
end = begin - dt()
print('Done')
exit()
train = train.mean(-1).mean(-1)
scaler = StandardScaler()
train = scaler.fit_transform(train)
print(train.shape)
print('2')
valid = np.load('/A/VSE/data/coco_tensor_precomp/val_ims.npy')
valid = valid.mean(-1).mean(-1)
valid = scaler.transform(valid)
print(valid.shape)
print('3')
test = np.load('/A/VSE/data/coco_tensor_precomp/test_ims.npy')
test = test.mean(-1).mean(-1)
test = scaler.transform(test)
print(test.shape)
print('4')

ftest = np.load('/A/VSE/data/f30k_tensor_precomp/test_ims.npy')
ftest = ftest.mean(-1).mean(-1)
ftest = scaler.transform(ftest)
print(ftest.shape)
print('5')
Beispiel #30
0
def main(args):
    global Driver_Instance

    rospy.init_node("autonomous", anonymous=True)

    config = yaml.load(open("./config/config.yaml"), Loader=yaml.FullLoader)
    Driver_Instance = Driver(gainL=args.GAIN_L, gainA=args.GAIN_A)
    LRS_Instance = LRS(Driver_Instance, clean=args.fresh)

    # Subscribe to relevant topics and services
    rospy.Subscriber(config["odometry_topic"], Odometry, route)

    # Wait for elevation map to be available
    rospy.loginfo("Waiting for topics to become online...")
    rospy.rostime.wallsleep(0.1)
    rospy.wait_for_message(config["odometry_topic"], Odometry, timeout=10)
    rospy.loginfo("OK!")

    try:
        # ROS Loop
        rospy.loginfo("Running the control loop")
        while not rospy.core.is_shutdown():

            if Driver_Instance.flag == DriverStatus.HALT:
                try:
                    ans = input("Execute? [y/n]")
                    if ans == "n":
                        exit(0)
                except ValueError:
                    print("Invalid input")
                    continue
                Driver_Instance.flag = DriverStatus.NORMAL

            start = dt()
            px, py, _ = Driver_Instance.get_current_loc()

            if LRS_Instance.has_landmarks():
                AR = LRS_Instance.dictionary
                valid = 0
                markers = []

                for m_id, m in AR.items():
                    if m_id == 0 or m_id == 1:
                        valid += 1
                        markers.append(m.loc())

                if valid != 2:
                    continue

                x, y = get_next((px, py), markers, scale=2, tol=0.8)

                rospy.logwarn(
                    "\nx={:.2f} :: y={:.2f} :: in {:.2f} ms\n".format(
                        x, y, 1000 * (dt() - start)))
                Driver_Instance.go_to_position(x, y)

            rospy.rostime.wallsleep(0.1)
    except (Exception, rospy.ROSException, KeyboardInterrupt):
        exc_type, exc_value, exc_traceback = sys.exc_info()
        rospy.logfatal("Program crashed or halted")
        traceback.print_exception(exc_type,
                                  exc_value,
                                  exc_traceback,
                                  limit=2,
                                  file=sys.stdout)
        rospy.core.signal_shutdown("exited")
        exit(1)