コード例 #1
0
    def get_debug_page(self, status_code, response_headers, original_response, stages_total):

        import frontik.app

        start_time = time.time()
        self.debug_log_handler.flush()

        debug_log_data = copy.deepcopy(self.debug_log_handler.log_data)
        debug_log_data.set('code', str(status_code))
        debug_log_data.set('mode', ','.join(self.debug_mode.mode_values))
        debug_log_data.set('started', _format_number(self.handler.request._start_time))
        debug_log_data.set('request-id', str(self.handler.request_id))
        debug_log_data.set('stages-total', _format_number(stages_total))

        if hasattr(self.handler.config, 'debug_labels') and isinstance(self.handler.config.debug_labels, dict):
            debug_log_data.append(frontik.xml_util.dict_to_xml(self.handler.config.debug_labels, 'labels'))

        try:
            debug_log_data.append(E.versions(
                etree.tostring(frontik.app.get_frontik_and_apps_versions(self.handler.application), encoding=unicode)
            ))
        except:
            debug_log.exception('cannot add version information')
            debug_log_data.append(E.versions('failed to get version information'))

        debug_log_data.append(E.request(
            E.method(self.handler.request.method),
            _params_to_xml(self.handler.request.uri, self.handler.log),
            _headers_to_xml(self.handler.request.headers),
            _cookies_to_xml(self.handler.request.headers)
        ))

        debug_log_data.append(E.response(_headers_to_xml(response_headers)))

        if getattr(self.handler, "_response_size", None) is not None:
            debug_log_data.set("response-size", str(self.handler._response_size))

        if original_response is not None:
            debug_log_data.append(frontik.xml_util.dict_to_xml(original_response, 'original-response'))

        debug_log_data.set('generate-time', _format_number((time.time() - start_time) * 1000))

        # return raw xml if this is specified explicitly (noxsl=true) or when in inherited mode
        if frontik.util.get_cookie_or_url_param_value(self.handler, 'noxsl') is None and not self.debug_mode.inherited:
            try:
                transform = etree.XSLT(etree.parse(self.DEBUG_XSL))
                log_document = str(transform(debug_log_data))
                self.handler.set_header('Content-Type', 'text/html; charset=UTF-8')
            except Exception:
                self.handler.log.exception('XSLT debug file error')
                try:
                    self.handler.log.error('XSL error log entries:\n%s' % "\n".join(map(
                        'File "{0.filename}", line {0.line}, column {0.column}\n\t{0.message}'
                        .format, transform.error_log)))
                except Exception:
                    pass

                self.handler.set_header('Content-Type', 'application/xml; charset=UTF-8')
                log_document = etree.tostring(debug_log_data, encoding='UTF-8', xml_declaration=True)
        else:
            self.handler.set_header('Content-Type', 'application/xml; charset=UTF-8')
            log_document = etree.tostring(debug_log_data, encoding='UTF-8', xml_declaration=True)

        return log_document
コード例 #2
0
    def fit_curve(self, lane_pts):     
        self.not_left = False
        self.not_right = False

        x_left, y_left, x_right, y_right = self.preprocess_pts(lane_pts)

        # y포인트들(여기서 y값은 가로범위)이 갯수가 0 일경우 초기화
        if len(y_left) <= 10 or len(y_right) <= 10:
            if len(y_left) == 0:
                # print('y_left point does not exist')
                self.not_left = True
            else:
                # print("y_right point does not exist")
                self.not_right = True
            self._init_model()
            x_left, y_left, x_right, y_right = self.preprocess_pts(lane_pts)

        # 차선 폭이 너무 졻게 계산되면 자동으로 초기화
        if self.lane_width < 0.01:
            # print("Too Short lane width")
            self._init_model()
            x_left, y_left, x_right, y_right = self.preprocess_pts(lane_pts)
        
        X_left = np.stack([x_left**i for i in reversed(range(1, self.order+1))]).T
        X_right = np.stack([x_right**i for i in reversed(range(1, self.order+1))]).T

        if y_left.shape[0]>=self.ransac_left.min_samples:
            self.ransac_left.fit(X_left, y_left)

        if y_right.shape[0]>=self.ransac_right.min_samples:
            self.ransac_right.fit(X_right, y_right)

        x_pred = np.arange(0, self.x_range, self.dx).astype(np.float32)
        X_pred = np.stack([x_pred**i for i in reversed(range(1, self.order+1))]).T

        y_pred_l = self.ransac_left.predict(X_pred)
        y_pred_r = self.ransac_right.predict(X_pred)
        
        if y_left.shape[0]>=self.ransac_left.min_samples and y_right.shape[0]>=self.ransac_right.min_samples:
            # if self.lane_width < self.init_width - self.init_width/2:
            #     # print("Too Short lane width")
            #     self.lane_width = self.init_width
            # else:
            #     self.lane_width = np.mean(y_pred_l - y_pred_r)
            self.lane_width = np.mean(y_pred_l - y_pred_r)

        if y_left.shape[0]<self.ransac_left.min_samples:
            y_pred_l = cp.deepcopy(y_pred_r + self.lane_width)
        
        if y_right.shape[0]<self.ransac_right.min_samples:
            y_pred_r = cp.deepcopy(y_pred_l - self.lane_width)

        # # 예측한 각 y값들의 거리 평균이 0.1보다 작을경우 차선을 강제로 분리
        # if1 , if2 = False, False
        # if np.mean(np.abs(y_pred_l - y_pred_r)) <= 0.1:
        #     if1 = True if y_pred_l.all() < 0 and y_pred_r.all() < 0 else False
        #     if2 = True if y_pred_l.all() > 0 and y_pred_r.all() > 0 else False
        #     # 둘다 0보다 큰 값이면 차선이 왼쪽에 있다는 것
        #     if if1:
        #         y_pred_l = cp.deepcopy(y_pred_l + self.init_width)
        #     # 둘다 0보다 작은 값이면 차선이 오른쪽에 있다는 것
        #     if if2:
        #         y_pred_r = cp.deepcopy(y_pred_r - self.init_width)

        if np.mean(np.abs(y_pred_l - y_pred_r)) <= 0.1:
            # 둘다 0보다 큰 값이면 차선이 왼쪽에 있다는 것
            if y_pred_l[5] < 0 and y_pred_r[5] < 0:
                y_pred_l = y_pred_l + self.init_width
            # 둘다 0보다 작은 값이면 차선이 오른쪽에 있다는 것
            if y_pred_l[5] > 0 and y_pred_r[5] > 0:
                y_pred_r = y_pred_r - self.init_width

        # if self.not_left:
        #     y_pred_l = cp.deepcopy(y_pred_r)
        #     y_pred_r = y_pred_r - self.init_width
        # elif self.not_right:
        #     y_pred_r = cp.deepcopy(y_pred_l)
        #     y_pred_l = y_pred_l + self.init_width

        # print("lane: {:.2f} | notleft: {:s} | notright: {:s}".format(self.lane_width, str(self.not_left), str(self.not_right)))
        print("lane: {:.2f}".format(self.lane_width))
        # print("Over 0: {:s} | Under 0: {:s}".format(str(if1), str(if2)))

        return x_pred, y_pred_l, y_pred_r
コード例 #3
0
ファイル: LOSResult.py プロジェクト: mburger-stsci/nexoclom
    def determine_source_from_data(self, scdata):
        # Search for unfitted outputfiles
        self.fitted = True
        self.inputs.options.fitted = False
        unfit_outid, unfit_outputfiles, unfit_npackets, _ = self.inputs.search()
        if unfit_npackets == 0:
            raise RuntimeError('No packets found for these Inputs.')
        else:
            self.unfit_outid, self.unfit_outputfiles = unfit_outid, unfit_outputfiles
            self.inputs.options.fitted = True
            
            self.inputs.spatialdist = SpatialDist({'type': 'fitted output'})
            self.inputs.spatialdist.query = scdata.query
            
            self.inputs.speeddist= SpeedDist({'type': 'fitted output'})
            self.inputs.speeddist.query = scdata.query

        data = scdata.data
        iteration_results = []
        dist_from_plan = self._data_setup(data)

        # Determine which points should be used for the fit
        _, _, mask = mathMB.fit_model(data.radiance, None, data.sigma,
                                      masking=self.masking, mask_only=True,
                                      altitude=data.alttan)

        for ufit_id, ufit_out in zip(self.unfit_outid, self.unfit_outputfiles):
            # Search for fitted outputfiles
            self.inputs.spatialdist.unfit_outid = ufit_id
            self.inputs.speeddist.unfit_outid = ufit_id
            self.outid, self.outputfiles, _, _ = self.inputs.search()
            assert len(self.outid) <= 1
            
            # Search for completed fitted models
            search_results = self.search()
            if len(self.outid) == 1:
                search_result = search_results.get(self.outputfiles[0], None)
            else:
                search_result = None
            
            if search_result is None:
                output = Output.restore(ufit_out)
                
                packets = copy.deepcopy(output.X)
                packets['radvel_sun'] = (packets['vy'] +
                                         output.vrplanet.to(self.unit / u.s).value)

                self.oedge = output.inputs.options.outeredge * 2
                
                # Will base shadow on line of sight, not the packets
                out_of_shadow = np.ones(packets.shape[0])
                self.packet_weighting(packets, out_of_shadow, output.aplanet)
                
                # This sets limits on regions where packets might be
                tree = self._tree(packets[xcols].values)
                
                # rad = modeled radiance
                # weighting = list of the weights that should be applied
                #   - Final weighting for each packet is mean of weights
                rad = pd.Series(np.zeros(data.shape[0]), index=data.index)
                ind0 = packets.Index.unique()
                weight_info = {
                    'saved_packets': pd.Series((np.ndarray((0,), dtype=int)
                                                for _ in range(data.shape[0])),
                                               index=data.index),
                   'weighting': pd.Series((np.ndarray((0,))
                                           for _ in range(ind0.shape[0])),
                                          index=ind0),
                   'included': pd.Series((np.ndarray((0,), dtype=np.int)
                                          for _ in range(ind0.shape[0])),
                                          index=ind0)}
                    
                print(f'{data.shape[0]} spectra taken.')
                for i, spectrum in data.iterrows():
                    rad_, _ = self._spectrum_process(spectrum, packets, tree,
                                                     dist_from_plan[i],
                                                     find_weighting=mask[i],
                                                     i=i, weight_info=weight_info)
                    rad.loc[i] = rad_
                    
                    if len(data) > 10:
                        ind = data.index.get_loc(i)
                        if (ind % (len(data) // 10)) == 0:
                            print(f'Completed {ind + 1} spectra')
                    
                assert np.all(weight_info['weighting'].apply(len) ==
                              weight_info['included'].apply(len))

                # Determine the proper weightings
                new_weight = weight_info['weighting'].apply(
                    lambda x:x.mean() if x.shape[0] > 0 else 0.)
                new_weight /= new_weight[new_weight > 0].mean()
                assert np.all(np.isfinite(new_weight))
                
                if np.any(new_weight > 0):
                    multiplier = new_weight.loc[output.X['Index']].values
                    output.X.loc[:, 'frac'] = output.X.loc[:, 'frac'] * multiplier
                    output.X0.loc[:, 'frac'] = output.X0.loc[:, 'frac'] * new_weight
                    
                    output.X = output.X[output.X.frac > 0]
                    output.X0 = output.X0[output.X0.frac > 0]
                    output.totalsource = output.X0['frac'].sum() * output.nsteps
                    
                    # Save the fitted output
                    output.inputs = self.inputs
                    output.save()
                    
                    # Find the radiance again with the new output
                    packets = copy.deepcopy(output.X)
                    packets['radvel_sun'] = (packets['vy'] +
                                             output.vrplanet.to(self.unit / u.s).value)
                    out_of_shadow = np.ones(packets.shape[0])
                    self.packet_weighting(packets, out_of_shadow, output.aplanet)
                    tree = self._tree(packets[xcols].values)
                    rad = pd.Series(np.zeros(data.shape[0]), index=data.index)

                    for i, spectrum in data.iterrows():
                        rad_, _ = self._spectrum_process(spectrum, packets, tree,
                                                         dist_from_plan[i],
                                                         find_weighting=False,
                                                         i=i)
                        rad.loc[i] = rad_
    
                        if len(data) > 10:
                            ind = data.index.get_loc(i)
                            if (ind % (len(data) // 10)) == 0:
                                print(f'Completed {ind + 1} spectra')

                    # Save the starting state for making a source map
                    # longitude = np.append(longitude, output.X0.longitude)
                    # latitude = np.append(latitude, output.X0.latitude)
                    # vel_ = np.sqrt(output.X0.vx**2 + output.X0.vy**2 +
                    #                output.X0.vz**2) * self.inputs.geometry.planet.radius
                    # velocity = np.append(velocity, vel_)
                    # weight = np.append(weight, output.X0.frac)
                    
                    iteration = {'radiance': rad,
                                 'npackets': output.X0.frac.sum(),
                                 'totalsource': output.totalsource,
                                 'outputfile': output.filename,
                                 'out_idnum': output.idnum,
                                 'unfit_outputfile': ufit_out,
                                 'unfit_outid': ufit_id}
                    iteration_result = FittedIterationResult(iteration)
                    iteration_result.saved_packets = weight_info['saved_packets']
                    iteration_result.weighting = weight_info['weighting']
                    iteration_result.included = weight_info['included']
                else:
                    iteration = {'radiance': rad,
                                 'npackets': 0.,
                                 'totalsource': 0.,
                                 'outputfile': output.filename,
                                 'out_idnum': output.idnum,
                                 'unfit_outputfile': ufit_out,
                                 'unfit_outid': ufit_id}
                    iteration_result = FittedIterationResult(iteration)
                    iteration_result.saved_packets = weight_info['saved_packets']
                    iteration_result.weighting = weight_info['weighting']
                    iteration_result.included = weight_info['included']

                modelfile = self.save(iteration_result)
                iteration_result.modelfile = modelfile
                iteration_results.append(iteration_result)
                del output
            else:
                # Restore saved result
                print(f'Using saved file {search_result[2]}')
                iteration_result = self.restore(search_result)
                assert len(iteration_result.radiance) == len(data)
                iteration_result.model_idnum = search_result[0]
                iteration_result.modelfile = search_result[2]
                iteration_results.append(iteration_result)

        # Combine iteration_results into single new result
        self.modelfiles = {}
        for iteration_result in iteration_results:
            self.radiance += iteration_result.radiance
            self.totalsource += iteration_result.totalsource
            self.modelfiles[iteration_result.outputfile] = iteration_result.modelfile
        
        self.outputfiles = self.modelfiles.keys()
        model_rate = self.totalsource/self.inputs.options.endtime.value
        self.atoms_per_packet = 1e23 / model_rate
        self.radiance *= self.atoms_per_packet/1e3*u.kR
        self.determine_source_rate(scdata)
        self.atoms_per_packet *= self.sourcerate.unit
        self.outputfiles = list(self.modelfiles.keys())

        print(self.totalsource, self.atoms_per_packet)
コード例 #4
0
ファイル: neuralel.py プロジェクト: yubuyuabc/neural-el
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    FLAGS_check(FLAGS)

    config = Config(FLAGS.config, verbose=False)
    vocabloader = VocabLoader(config)

    if FLAGS.mode == 'inference':
        FLAGS.dropout_keep_prob = 1.0
        FLAGS.wordDropoutKeep = 1.0
        FLAGS.cohDropoutKeep = 1.0

        reader = InferenceReader(config=config,
                                 vocabloader=vocabloader,
                                 test_mens_file=config.test_file,
                                 num_cands=FLAGS.num_cand_entities,
                                 batch_size=FLAGS.batch_size,
                                 strict_context=FLAGS.strict_context,
                                 pretrain_wordembed=FLAGS.pretrain_wordembed,
                                 coherence=FLAGS.coherence)
        docta = reader.ccgdoc
        model_mode = 'inference'

    elif FLAGS.mode == 'test':
        FLAGS.dropout_keep_prob = 1.0
        FLAGS.wordDropoutKeep = 1.0
        FLAGS.cohDropoutKeep = 1.0

        reader = TestDataReader(config=config,
                                vocabloader=vocabloader,
                                test_mens_file=config.test_file,
                                num_cands=30,
                                batch_size=FLAGS.batch_size,
                                strict_context=FLAGS.strict_context,
                                pretrain_wordembed=FLAGS.pretrain_wordembed,
                                coherence=FLAGS.coherence)
        model_mode = 'test'

    else:
        print("MODE in FLAGS is incorrect : {}".format(FLAGS.mode))
        sys.exit()

    config_proto = tf.ConfigProto()
    config_proto.allow_soft_placement = True
    config_proto.gpu_options.allow_growth = True
    sess = tf.Session(config=config_proto)

    with sess.as_default():
        model = ELModel(
            sess=sess,
            reader=reader,
            dataset=FLAGS.dataset,
            max_steps=FLAGS.max_steps,
            pretrain_max_steps=FLAGS.pretraining_steps,
            word_embed_dim=FLAGS.word_embed_dim,
            context_encoded_dim=FLAGS.context_encoded_dim,
            context_encoder_num_layers=FLAGS.context_encoder_num_layers,
            context_encoder_lstmsize=FLAGS.context_encoder_lstmsize,
            coherence_numlayers=FLAGS.coherence_numlayers,
            jointff_numlayers=FLAGS.jointff_numlayers,
            learning_rate=FLAGS.learning_rate,
            dropout_keep_prob=FLAGS.dropout_keep_prob,
            reg_constant=FLAGS.reg_constant,
            checkpoint_dir=FLAGS.checkpoint_dir,
            optimizer=FLAGS.optimizer,
            mode=model_mode,
            strict=FLAGS.strict_context,
            pretrain_word_embed=FLAGS.pretrain_wordembed,
            typing=FLAGS.typing,
            el=FLAGS.el,
            coherence=FLAGS.coherence,
            textcontext=FLAGS.textcontext,
            useCNN=FLAGS.useCNN,
            WDLength=FLAGS.WDLength,
            Fsize=FLAGS.Fsize,
            entyping=FLAGS.entyping)

        if FLAGS.mode == 'inference':
            print("Doing inference")
            (predTypScNPmat_list, widIdxs_list, priorProbs_list,
             textProbs_list, jointProbs_list, evWTs_list,
             pred_TypeSetsList) = model.inference(ckptpath=FLAGS.model_path)

            numMentionsInference = len(widIdxs_list)
            numMentionsReader = 0
            for sent_idx in reader.sentidx2ners:
                numMentionsReader += len(reader.sentidx2ners[sent_idx])
            assert numMentionsInference == numMentionsReader

            mentionnum = 0
            entityTitleList = []
            for sent_idx in reader.sentidx2ners:
                nerDicts = reader.sentidx2ners[sent_idx]
                sentence = ' '.join(reader.sentences_tokenized[sent_idx])
                for s, ner in nerDicts:
                    [evWTs, evWIDS, evProbs] = evWTs_list[mentionnum]
                    predTypes = pred_TypeSetsList[mentionnum]
                    print(reader.bracketMentionInSentence(sentence, ner))
                    print("Prior: {} {}, Context: {} {}, Joint: {} {}".format(
                        evWTs[0], evProbs[0], evWTs[1], evProbs[1], evWTs[2],
                        evProbs[2]))

                    entityTitleList.append(evWTs[2])
                    print("Predicted Entity Types : {}".format(predTypes))
                    print("\n")
                    mentionnum += 1

            elview = copy.deepcopy(docta.view_dictionary['NER_CONLL'])
            elview.view_name = 'ENG_NEURAL_EL'
            for i, cons in enumerate(elview.cons_list):
                cons['label'] = entityTitleList[i]

            docta.view_dictionary['ENG_NEURAL_EL'] = elview

            print("elview.cons_list")
            print(elview.cons_list)
            print("\n")

            for v in docta.as_json['views']:
                print(v)
                print("\n")

        elif FLAGS.mode == 'test':
            print("Testing on Data ")
            (widIdxs_list, condProbs_list, contextProbs_list,
             condContextJointProbs_list, evWTs,
             sortedContextWTs) = model.dataset_test(ckptpath=FLAGS.model_path)

            print(len(widIdxs_list))
            print(len(condProbs_list))
            print(len(contextProbs_list))
            print(len(condContextJointProbs_list))
            print(len(reader.mentions))

            print("Writing Test Predictions: {}".format(FLAGS.test_out_fp))
            with open(FLAGS.test_out_fp, 'w') as f:
                for (wididxs, pps, mps,
                     jps) in zip(widIdxs_list, condProbs_list,
                                 contextProbs_list,
                                 condContextJointProbs_list):

                    mentionPred = ""

                    for (wididx, prp, mp, jp) in zip(wididxs, pps, mps, jps):
                        wit = reader.widIdx2WikiTitle(wididx)
                        mentionPred += wit + " " + str(prp) + " " + \
                            str(mp) + " " + str(jp)
                        mentionPred += "\t"

                    mentionPred = mentionPred.strip() + "\n"

                    f.write(mentionPred)

            print("Done writing. Can Exit.")

        else:
            print("WRONG MODE!")
            sys.exit(0)

    sys.exit()
コード例 #5
0
ファイル: assignment4.py プロジェクト: Titanhood/JWALA
    print(match)
#question_3
x=[]
y=int(input())
for i in range(y):
    z=int(input())
    x.append(z)
    print(x)
#question_4
a1=[1,2,3,2,1]
b1=a1[::-1]
if(a1==b1):
    print('palindrome')
else:
    print('not palindrome')
#question_5
import copy as c
list1=[1,2,3,4]
list2=c.deepcopy(list1)
print(list1)
list1[3]=75
print(list1)
print(list2)
#list 2 wont change because its a deep copy of list1
#A shallow copy constructs a new compound object and then (to the extent possible) inserts references into it to the objects found in the original.
#A deep copy constructs a new compound object and then, recursively, inserts copies into it of the objects found in the original.




コード例 #6
0
ファイル: rest.py プロジェクト: th3architect/hp-sdn-client
 def _download_args(self):
     args = copy.deepcopy(self.args)
     args["headers"]["content-type"] = 'application/zip'
     args["timeout"] = 60
     args["stream"] = True
     return args
コード例 #7
0
def deep_copy_item(item):
    return copy.deepcopy(item)
コード例 #8
0
ファイル: params_dict.py プロジェクト: vishalbelsare/tpu
 def _set(self, k, v):
     if isinstance(v, dict):
         self.__dict__[k] = ParamsDict(v)
     else:
         self.__dict__[k] = copy.deepcopy(v)
コード例 #9
0
    def train(self, max_epochs, max_gradient_norm):
        """Train model"""
        model = self.model
        epochs_done = self.train_info['epoch']
        optimizer = self.optimizer
        scheduler = self.optim_scheduler

        for epoch in range(epochs_done, max_epochs):
            print("\n\nStart Epoch %d" % (epoch + 1))
            start_time = time.time()
            # with autograd.detect_anomaly():
            model.train()
            np.random.shuffle(self.train_examples)

            for idx, cur_example in enumerate(self.train_examples):
                def handle_example(train_example):
                    self.train_info['global_steps'] += 1
                    loss = model(train_example)
                    total_loss = loss['mention']

                    if torch.isnan(total_loss):
                        print("Loss is NaN")
                        sys.exit()
                    # Backprop
                    optimizer.zero_grad()
                    total_loss.backward()
                    # Perform gradient clipping and update parameters
                    torch.nn.utils.clip_grad_norm_(
                        model.parameters(), max_gradient_norm)

                    optimizer.step()

                from copy import deepcopy
                handle_example(deepcopy(cur_example))

                if (idx + 1) % 50 == 0:
                    print("Steps %d, Max memory %.3f" % (idx + 1, (torch.cuda.max_memory_allocated() / (1024 ** 3))))
                    torch.cuda.reset_peak_memory_stats()
                    # print("Current memory %.3f" % (torch.cuda.memory_allocated() / (1024 ** 3)))
                    # print(torch.cuda.memory_summary())

            # Update epochs done
            self.train_info['epoch'] = epoch + 1
            # Validation performance
            fscore, threshold = self.eval_model()

            scheduler.step(fscore)

            # Update model if validation performance improves
            if fscore > self.train_info['val_perf']:
                self.train_info['val_perf'] = fscore
                self.train_info['threshold'] = threshold
                logging.info('Saving best model')
                self.save_model(self.best_model_path)

            # Save model
            self.save_model(self.model_path)

            # Get elapsed time
            elapsed_time = time.time() - start_time
            logging.info("Epoch: %d, Time: %.2f, F-score: %.3f"
                         % (epoch + 1, elapsed_time, fscore))

            sys.stdout.flush()
コード例 #10
0
ファイル: test_file_upload.py プロジェクト: yuekui/dj-stripe
from copy import deepcopy
from unittest.mock import ANY, call, patch

import pytest

from djstripe.models import Account, FileUpload

from . import FAKE_ACCOUNT, FAKE_FILEUPLOAD_ICON, FAKE_FILEUPLOAD_LOGO


@pytest.mark.django_db
@patch(
    target="stripe.FileUpload.retrieve",
    autospec=True,
    return_value=deepcopy(FAKE_FILEUPLOAD_ICON),
)
def test_file_upload_api_retrieve(mock_file_upload_retrieve):
    """Expect file_upload to use the ID of the account referring
    to it to retrieve itself.
    """
    # Create files
    icon_file = FileUpload._get_or_create_from_stripe_object(data=FAKE_FILEUPLOAD_ICON)[
        0
    ]
    logo_file = FileUpload._get_or_create_from_stripe_object(data=FAKE_FILEUPLOAD_LOGO)[
        0
    ]
    # Create account to associate the files to it
    account = Account._get_or_create_from_stripe_object(data=FAKE_ACCOUNT)[0]

    # Call the API retrieve methods.
コード例 #11
0
ファイル: q85.py プロジェクト: djs1193/codeabbey
field[3][7] = "X"
field[4][3] = "X"
field[4][4] = "X"
field[4][5] = "X"
field[4][7] = "X"
field[5][3] = "X"
field[5][6] = "X"
field[5][7] = "X"
field[6][3] = "X"
field[6][7] = "X"
field[6][8] = "X"
field[6][9] = "X"
field[7][8] = "X"
field[7][9] = "X"

newfield = deepcopy(field)

print(field)
print(" ")

def neighbor(r,c,field):
    n = 0
    try :
        if (field[r-1][c-1] == "X"):
            n+=1
        if (field[r-1][c] == "X"):
            n+=1
        if (field[r-1][c+1] == "X"):
            n+=1
        if (field[r][c-1] == "X"):
            n+=1
コード例 #12
0
ファイル: checkupdates.py プロジェクト: v1d0/fdroidserver
def checkupdates_app(app):

    # If a change is made, commitmsg should be set to a description of it.
    # Only if this is set will changes be written back to the metadata.
    commitmsg = None

    tag = None
    msg = None
    vercode = None
    noverok = False
    mode = app.UpdateCheckMode
    if mode.startswith('Tags'):
        pattern = mode[5:] if len(mode) > 4 else None
        (version, vercode, tag) = check_tags(app, pattern)
        if version == 'Unknown':
            version = tag
        msg = vercode
    elif mode == 'RepoManifest':
        (version, vercode) = check_repomanifest(app)
        msg = vercode
    elif mode.startswith('RepoManifest/'):
        tag = mode[13:]
        (version, vercode) = check_repomanifest(app, tag)
        msg = vercode
    elif mode == 'RepoTrunk':
        (version, vercode) = check_repotrunk(app)
        msg = vercode
    elif mode == 'HTTP':
        (version, vercode) = check_http(app)
        msg = vercode
    elif mode in ('None', 'Static'):
        version = None
        msg = 'Checking disabled'
        noverok = True
    else:
        version = None
        msg = 'Invalid update check method'

    if version and vercode and app.VercodeOperation:
        if not common.VERCODE_OPERATION_RE.match(app.VercodeOperation):
            raise MetaDataException(_('Invalid VercodeOperation: {field}')
                                    .format(field=app.VercodeOperation))
        oldvercode = str(int(vercode))
        op = app.VercodeOperation.replace("%c", oldvercode)
        vercode = str(common.calculate_math_string(op))
        logging.debug("Applied vercode operation: %s -> %s" % (oldvercode, vercode))

    if version and any(version.startswith(s) for s in [
            '${',  # Gradle variable names
            '@string/',  # Strings we could not resolve
            ]):
        version = "Unknown"

    updating = False
    if version is None:
        logmsg = "...{0} : {1}".format(app.id, msg)
        if noverok:
            logging.info(logmsg)
        else:
            logging.warning(logmsg)
    elif vercode == app.CurrentVersionCode:
        logging.info("...up to date")
    else:
        logging.debug("...updating - old vercode={0}, new vercode={1}".format(
            app.CurrentVersionCode, vercode))
        app.CurrentVersion = version
        app.CurrentVersionCode = str(int(vercode))
        updating = True

    commitmsg = fetch_autoname(app, tag)

    if updating:
        name = common.getappname(app)
        ver = common.getcvname(app)
        logging.info('...updating to version %s' % ver)
        commitmsg = 'Update CV of %s to %s' % (name, ver)

    if options.auto:
        mode = app.AutoUpdateMode
        if not app.CurrentVersionCode:
            logging.warning("Can't auto-update app with no current version code: " + app.id)
        elif mode in ('None', 'Static'):
            pass
        elif mode.startswith('Version '):
            pattern = mode[8:]
            if pattern.startswith('+'):
                try:
                    suffix, pattern = pattern.split(' ', 1)
                except ValueError:
                    raise MetaDataException("Invalid AUM: " + mode)
            else:
                suffix = ''
            gotcur = False
            latest = None
            for build in app.builds:
                if int(build.versionCode) >= int(app.CurrentVersionCode):
                    gotcur = True
                if not latest or int(build.versionCode) > int(latest.versionCode):
                    latest = build

            if int(latest.versionCode) > int(app.CurrentVersionCode):
                logging.info("Refusing to auto update, since the latest build is newer")

            if not gotcur:
                newbuild = copy.deepcopy(latest)
                newbuild.disable = False
                newbuild.versionCode = app.CurrentVersionCode
                newbuild.versionName = app.CurrentVersion + suffix
                logging.info("...auto-generating build for " + newbuild.versionName)
                commit = pattern.replace('%v', newbuild.versionName)
                commit = commit.replace('%c', newbuild.versionCode)
                newbuild.commit = commit
                app.builds.append(newbuild)
                name = common.getappname(app)
                ver = common.getcvname(app)
                commitmsg = "Update %s to %s" % (name, ver)
        else:
            logging.warning('Invalid auto update mode "' + mode + '" on ' + app.id)

    if commitmsg:
        metadata.write_metadata(app.metadatapath, app)
        if options.commit:
            logging.info("Commiting update for " + app.metadatapath)
            gitcmd = ["git", "commit", "-m", commitmsg]
            if 'auto_author' in config:
                gitcmd.extend(['--author', config['auto_author']])
            gitcmd.extend(["--", app.metadatapath])
            if subprocess.call(gitcmd) != 0:
                raise FDroidException("Git commit failed")
コード例 #13
0
 def __deepcopy__(self, memo):
     return ndict(copy.deepcopy(self.__internal))
コード例 #14
0
ファイル: cli.py プロジェクト: realflash7/raiden
    def _run_smoketest():
        print_step('Starting Raiden')

        config = deepcopy(App.DEFAULT_CONFIG)
        if args.get('extra_config', dict()):
            merge_dict(config, args['extra_config'])
            del args['extra_config']
        args['config'] = config

        raiden_stdout = StringIO()
        with contextlib.redirect_stdout(raiden_stdout):
            app = run_app(**args)

            try:
                raiden_api = RaidenAPI(app.raiden)
                rest_api = RestAPI(raiden_api)
                (api_host, api_port) = split_endpoint(args['api_address'])
                api_server = APIServer(rest_api, config={'host': api_host, 'port': api_port})
                api_server.start()

                raiden_api.channel_open(
                    registry_address=contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY],
                    token_address=to_canonical_address(token.contract.address),
                    partner_address=to_canonical_address(TEST_PARTNER_ADDRESS),
                )
                raiden_api.set_total_channel_deposit(
                    contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY],
                    to_canonical_address(token.contract.address),
                    to_canonical_address(TEST_PARTNER_ADDRESS),
                    TEST_DEPOSIT_AMOUNT,
                )
                token_addresses = [to_checksum_address(token.contract.address)]

                success = False
                print_step('Running smoketest')
                error = run_smoketests(
                    app.raiden,
                    args['transport'],
                    token_addresses,
                    contract_addresses[CONTRACT_ENDPOINT_REGISTRY],
                    debug=debug,
                )
                if error is not None:
                    append_report('Smoketest assertion error', error)
                else:
                    success = True
            finally:
                app.stop()
                app.raiden.get()
                node = ethereum[0]
                node.send_signal(2)
                err, out = node.communicate()

                append_report('Ethereum stdout', out)
                append_report('Ethereum stderr', err)
        append_report('Raiden Node stdout', raiden_stdout.getvalue())
        if success:
            print_step(f'Smoketest successful')
        else:
            print_step(f'Smoketest had errors', error=True)
        return success
コード例 #15
0
ファイル: status.py プロジェクト: Haxine/mars-1
 def get_progress(self):
     return copy.deepcopy(self._progress)
コード例 #16
0
def dc(o):
    """
    Some of the testing methods modify the datastructure you pass into them.
    We want to deepcopy each structure so one test doesn't break another.
    """
    return copy.deepcopy(o)
コード例 #17
0
def anunt(link, printare = False):
    '''
    --------------CONEXIUNEA------------
    aici realizam conexiunea la site
    '''
    
    try:
        html = urlopen(link)
    except HTTPError as e: #in caz ca nu poate deschide anuntul
        print(link)
        return None       
    except requests.ConnectionError as e: #am primit eroare de conectare asa ca am pus sa incerce inca o data dupa 120 de secunde
        time.sleep(120)
        print(link)
        print('probleme la conexiune')
        anunt(link)
    anunt= bs(html.read(), 'html.parser')  #deschidem cu beautiful soup link-ul
    
    '''
    --------------TITLUL SI PRETUL------------
    extragem titlul si pretul
    in caz ca nu le putem extrage, functia va returna None si vom printa link-ul
    pentru ca nu aflam niste informatii esentiale
    '''
    
    try:
        titlu_anunt = anunt.find('div', {'id':'content'}).h1.text
    except AttributeError:
        print(link)
        print('nu am putut extrage titlul')
        return None
    
    
    try:
        pret = anunt.find('div', {'class':'price'}).text  #titlul era cu h1
        pret = re.sub(r'\D', '', pret)
        #titlu_anunt = titlu_anunt
    except AttributeError: #in caz ca nu gaseste dupa codul html sa nu mai incerce
        print('nu am putut extrage pretul')
        print(link)
        return None
    
    
    '''
    --------------DESCRIEREA SAU DATELE DESPRE APARTAMENT------------
    descrierea este textul care descrie apartamentul intr-un text, ca o poveste
    '''
    
    
    try:
        descriere_text = anunt.find('p', {'class':'description'}).get_text() #pretul era intr-un div cu id-ul respectiv
        descriere_text = descriere_text.replace('\n', '')
        descriere_text = descriere_text.replace('\r', '')
        descriere_text = descriere_text.replace('\t', '')
    except AttributeError:
        print(link)
        print('probleme cu descrierea')
        return None
    #print(descriere_text)
    
    
    '''
    --------------DATE APARTAMENT------------
    datele tabelare despre apartament sunt in 2 coloane, una cu descrierea informatiei 
    si una cu informatia
    extragem informatiile din fiecare coloana in cate o lista si dupa unim cele 2 coloane
    
    caracteristicile unui apartament sunt in 2 coloane
    o coloana contine caracterstica (numarul de camere)
    cealalta coloana contine valoarea (4 <camere>)
    '''

    
    try:
        coloana = anunt.find_all('div', {'class':"property_label"})
        descriere = anunt.find_all('div', {'class':'property_prop'})
        lista_coloana=[]
        lista_descriere=[]
    except AttributeError:
        print(link)
        print('probleme cu caracteristicile coloana-descriere')
        return None
    

    
    '''
    --------------UNIM CELE 2 COLOANE------------
    '''
    
    for i in coloana:
        cuvant = i.text
        cuvant = cuvant.replace(':', '')
        #excludem din string :
        cuvant = cuvant.replace('\xa0', 'Descriere suplimentara cladire')
        #aveam un loc gol in colaone care in textul html era marcat cu "\xa0"
        #si continea o descriere suplimentare a cladirii cum ar fi izolat termic
        lista_coloana.append(cuvant)

    for i in descriere:
        cuvant = i.text
        cuvant = cuvant.replace(' m2', '')
        #suprafata va contine si " m2"
        #am ales sa exclud "m2" pentru a avea doar date numerice
        lista_descriere.append(cuvant)
    
    dictionar = dict(zip(lista_coloana, lista_descriere))
    #vom face din cele 2 liste un dictionar
    #in cheie vom avea coloana unde se afla descrierea informatiei (Suprafata)
    #in valoare vom avea informatia (50 <mp>)
    
    
    
    dictionar['titlu']=titlu_anunt
    dictionar['descriere']=descriere_text
    dictionar['pret']=pret
    dictionar['link']=link
    
    
    '''
    --------------UTILITATI------------
    vom extrage ce se afla la utilitati
    la inceput fiecare apartament va avea un dictionar cu utilitati
    dupa vom uni dictionarul cu utilitati cu cel final
    '''
    
    
    dict_utilitati = {}
    try:
        cheie_utilitati =  anunt.find('fieldset', {'id':'utilitati'}).b.text
        cheie_utilitati = cheie_utilitati.replace(' ', '')
        cheie_utilitati = cheie_utilitati.replace(':', '')
        #cheia, coloana,  cu bold
        detalii_utilitati = anunt.find('fieldset', {'id':'utilitati'}).find_all('img')
        lista_utilitati = []
        for i in detalii_utilitati:
            j = i['alt']
            #valorea se afla in atributul "alt" al tag-ului img
            lista_utilitati.append(j)
        dict_utilitati[cheie_utilitati]=','.join(lista_utilitati)
    except AttributeError: 
        dict_utilitati['Utilitati']=None
        print('probleme cu utilitatile')
    
    
    
    
    '''
    --------------FINISAJE------------
    Vom extrage finisajelele si le vom pune in dictionar
    '''
    #vom extrage textul din codul html unde se afla utilitatile
    try:
        finisaje = anunt.find('fieldset', {'id':'finisari'}).get_text()
    except AttributeError:
        print(link)
        print('probleme cu finisajele')
        return None


    
    '''
    Prin functia de mai jos vom separa cu o virgula atunci cand urmatorul caracter
    este uppercase. vom dori sa returnam un string, vom pune dupa PVC o virgula si un spatiu
    si dupa vom face un split la fiecare virgula 
    pentru ca fiecare element sa fie intr-o lista "finis"
    '''
    def split_uppercase(s):
        r = []
        l = False
        for c in s:
            # l being: last character was not uppercase
            if l and c.isupper():
                r.append(',')
            l = not c.isupper()
            r.append(c)
        return ''.join(r)

    finisaje_mod = split_uppercase(finisaje)
    finisaje_mod = finisaje_mod.replace('PVC', 'PVC, ')
    #print(finisaje_mod)
    
    finis = finisaje_mod.split(',')
    
    #print('\n\n\n\n\n\n')
    #print(finis)
    #print('\n\n\n\n\n\n')
    
    
    '''
    --------DIN LISTA DE FINISAJE AM SEPARAT FIECARE CUVANT SI L-AM PUS INTR-O LISTA--------
    '''
    
    
    
    lista_key=[]

    
   # dict_finisaje = {}
    
   
    '''
    pentru ca am avut probleme cu spatiile de dupa numele variabilei, 
    (unele aveau spatiu dupa cuvant, altele nu)
    am dorit sa punem un key nou
    '''
    
    lista_key_noi = ['finisaje', 'pardosea', 'pereti', 'geamuri_usi', 'bucatarie', 'baie', 'dotari']
    lista_expresii = [r'.*Finisaje.*' , r'.*Pardoseala.*', r'.*Pereti.*', r'.*Geamuri si usi.*', r'.*Finisaj bucatarie.*', r'.*Finisaj baie.*', r'.*Dotari.*']

    for idx, i in enumerate(finis):
        if i == '\n':
            del finis[idx]
            
    for nou, expresie in zip(lista_key_noi, lista_expresii):
        for idx, i in enumerate(finis):
            match_regex = re.match( expresie, i)
            if match_regex:
                i = i.replace(i, nou)
                #print('da')
                #print(i)
                finis[idx]=i
    
    #for index, value in finis:
    dict_special={}
    
    '''
    lista descrieri va contine cheia pentru fiecare nou dictionar din finisaje
    adica va contine pereti, dotari, bucatarie etc
    
    descrierile vor fi cu litera mica, asa ca vom cauta in lista finis
    toate cuvintele care incepe cu litera mica
    '''
    
    lista_descrieri = []    
    for i in finis:
        merge = re.match(r'[a-z].*', i)
        if merge:
            lista_descrieri.append(i)    
    
    
    '''
    in lista "finis" se afla toate cuvintele din Finisaje
    cheile, coloanele sunt in lista_descrieri
    fiecare cheie are valori pana la urmatoarea cheie
    '''
    for index, value in enumerate(lista_descrieri):
        value1 = value
        index2 = lista_descrieri.index(value) + 1
        lista_finisaje = []
        try:
            value2 = lista_descrieri[index2]
            value1_index = finis.index(value1)
            value2_index = finis.index(value2)
            
            for i in finis[value1_index+1:value2_index]:
                lista_finisaje.append(i)
            value_finisaje = ','.join(lista_finisaje)
            dict_special[value1]=value_finisaje
        except IndexError:
            value1_index = finis.index(value1)
            for i in finis[value1_index+1:]:
                lista_finisaje.append(i)
            value_finisaje = ','.join(lista_finisaje)
            dict_special[value1]=value_finisaje
    
    
    dictionar_final={}
    def merge_two_dicts(x, y):
        z = x.copy()   
        z.update(y)    
        return z
    #unim dictionarul initial cu cel cu utilitati si cu cel cu finisaje
    dictionar_final = merge_two_dicts(dictionar, dict_utilitati)
    dictionar_final = merge_two_dicts(dictionar_final, dict_special)
    
    #redenumim cheile pentru a putea controla mai bine dictionarul    
    lista_veche = ['pret', 'Nr. bai', 'finisaje', 'Nr. terase', 'geamuri_usi', 'Descriere suplimentara cladire', 'Tip proprietate', 'Suprafata utila', 'pardosea', 'Tip Constructie', 'descriere', 'Nr. balcoane', 'titlu', 'ID', 'pereti', 'Confort', 'Compartimentare', 'Zona', 'dotari', 'link', 'Tranzactie', 'Nr. garaje', 'Suprafata construita', 'Nr. parcari', 'Nr. bucatarii', 'Nr. camere', 'baie', 'Etaj', 'Cartier', 'An Constructie', 'bucatarie', 'Utilitati']    
    lista_noua = ['pret', 'numar_bai', 'finisaje', 'numar_terase', 'geamuri_usi', 'descriere_suplimentara', 'tip_proprietate', 'suprafata_utila', 'pardosea', 'tip_constructie', 'descriere', 'numar_balcoane', 'titlu', 'ID', 'pereti', 'confort', 'compartimentare', 'zona', 'dotari', 'link', 'tranzactie', 'numar_garaje', 'suprafata_construita', 'numar_parcari', 'numar_bucatarii', 'numar_camere', 'baie', 'etaj', 'cartier', 'an_constructie', 'bucatarie', 'utilitati']    
    
    dictionar_scris= {}
    for v, n in zip(lista_veche, lista_noua):
        try:
            dictionar_scris[n] = copy.deepcopy(dictionar_final[v])
        except copy.error:
            dictionar_scris[n] = None
        except KeyError:
            dictionar_scris[n]= None
    
    '''   
    pentru a extrage geolocatia avem nevoie de zona completa
    aceasta va fi un string si va contine orasul, cartierul si daca avem si zona
    '''
    if dictionar_scris['zona']!=None:
        dictionar_scris['zona_completa'] = "Cluj-Napoca, " + dictionar_scris['cartier'] + ", " + dictionar_scris['zona']
    else:
        dictionar_scris['zona_completa'] = "Cluj-Napoca, " + dictionar_scris['cartier']
        #return zona_completa
    
    #dict_final = zonacompleta(dictionar_scris)

    if printare == True:
        print(dictionar_scris)    
    return dictionar_scris
コード例 #18
0
ファイル: mac_brew.py プロジェクト: viktordaniel/salt
def list_pkgs(versions_as_list=False, **kwargs):
    '''
    List the packages currently installed in a dict::

        {'<package_name>': '<version>'}

    CLI Example:

    .. code-block:: bash

        salt '*' pkg.list_pkgs
    '''
    versions_as_list = salt.utils.data.is_true(versions_as_list)
    # not yet implemented or not applicable
    if any([
            salt.utils.data.is_true(kwargs.get(x))
            for x in ('removed', 'purge_desired')
    ]):
        return {}

    if 'pkg.list_pkgs' in __context__:
        if versions_as_list:
            return __context__['pkg.list_pkgs']
        else:
            ret = copy.deepcopy(__context__['pkg.list_pkgs'])
            __salt__['pkg_resource.stringify'](ret)
            return ret

    ret = {}
    cmd = 'brew info --json=v1 --installed'
    package_info = salt.utils.json.loads(_call_brew(cmd)['stdout'])

    for package in package_info:
        # Brew allows multiple versions of the same package to be installed.
        # Salt allows for this, so it must be accounted for.
        versions = [v['version'] for v in package['installed']]
        # Brew allows for aliasing of packages, all of which will be
        # installable from a Salt call, so all names must be accounted for.
        names = package['aliases'] + [package['name'], package['full_name']]
        # Create a list of tuples containing all possible combinations of
        # names and versions, because all are valid.
        combinations = [(n, v) for n in names for v in versions]

        for name, version in combinations:
            __salt__['pkg_resource.add_pkg'](ret, name, version)

    # Grab packages from brew cask, if available.
    # Brew Cask doesn't provide a JSON interface, must be parsed the old way.
    try:
        cask_cmd = 'brew cask list --versions'
        out = _call_brew(cask_cmd)['stdout']

        for line in out.splitlines():
            try:
                name_and_versions = line.split(' ')
                name = '/'.join(('caskroom/cask', name_and_versions[0]))
                installed_versions = name_and_versions[1:]
                key_func = functools.cmp_to_key(
                    salt.utils.versions.version_cmp)
                newest_version = sorted(installed_versions, key=key_func).pop()
            except ValueError:
                continue
            __salt__['pkg_resource.add_pkg'](ret, name, newest_version)
    except CommandExecutionError:
        pass

    __salt__['pkg_resource.sort_pkglist'](ret)
    __context__['pkg.list_pkgs'] = copy.deepcopy(ret)
    if not versions_as_list:
        __salt__['pkg_resource.stringify'](ret)
    return ret
コード例 #19
0
ファイル: rest.py プロジェクト: th3architect/hp-sdn-client
 def _upload_args(self, filename):
     args = copy.deepcopy(self.args)
     args["headers"]["content-type"] = 'application/zip'
     args["headers"]["Filename"] = filename
     args["timeout"] = 60
     return args
コード例 #20
0
 def get_password_masked_url(cls, url):
     url_copy = deepcopy(url)
     if url_copy.password is not None and url_copy.password != PASSWORD_MASK:
         url_copy.password = PASSWORD_MASK
     return url_copy
コード例 #21
0
def tdidt_random_forest(current_instances, att_indexes, att_domains, F):

    # print(att_indexes)
    att_indexes2 = copy.deepcopy(att_indexes)
    if (len(att_indexes) > F):
        compute_random_subset(att_indexes, F)
    split_attribute = select_attribute(current_instances, att_indexes2,
                                       att_domains)
    # print("TEST", split_attribute, "T", att_indexes)
    class_label = "att" + str(split_attribute)
    att_indexes2 = copy.deepcopy(att_indexes)
    att_indexes2.remove(split_attribute)

    partitions = {}
    attributes = att_domains[class_label]
    for a in attributes:
        partitions[a] = []
    for instance in current_instances:
        partitions[instance[split_attribute]].append(instance)

    tree = ["Attribute", "att" + str(split_attribute)]

    for attribute_value, partition in partitions.items():
        values_subtree = ["Value", attribute_value]

        if len(partition) > 0 and all_same_class(partition):
            leaf = [
                "Leaf", partition[0][-1],
                len(partition),
                len(current_instances)
            ]
            values_subtree.append(leaf)
            tree.append(values_subtree)
        #    CASE 2: no more attributes to select (clash) => handle clash w/majority vote leaf node
        elif len(partition) > 0 and len(att_indexes2) == 0:
            partition_stats = compute_partition_stats(partition, -1)
            partition_stats.sort(key=lambda x: x[1])
            leaf = [
                "Leaf", partition_stats[-1][0],
                len(partition),
                len(current_instances)
            ]
            values_subtree.append(leaf)
            tree.append(values_subtree)

        #    CASE 3: no more instances to partition (empty partition) => backtrack and replace attribute node with majority vote leaf node
        elif len(partition) == 0:
            partition_stats = compute_partition_stats(current_instances, -1)
            partition_stats.sort(key=lambda x: x[1])
            leaf = [
                "Leaf", partition_stats[-1][0],
                len(partition),
                len(current_instances)
            ]
            return leaf
        else:  # all base cases are false, recurse!!
            subtree = tdidt_random_forest(partition, att_indexes2, att_domains,
                                          F)
            values_subtree.append(subtree)
            tree.append(values_subtree)
    return tree
コード例 #22
0
ファイル: MCTS.py プロジェクト: bartmate/Amoeba-Zero
 def __init__(self, game, evaluator, root_node = None):
     self.root_node = root_node
     if self.root_node is None:
         self.root_node = Node()
     self.game = copy.deepcopy(game)
     self.evaluator = evaluator   
コード例 #23
0
    def main(self, argv):

        def _get_subparser(api_version):
            try:
                return self.get_subcommand_parser(api_version, argv)
            except ImportError as e:
                if not str(e):
                    # Add a generic import error message if the raised
                    # ImportError has none.
                    raise ImportError('Unable to import module. Re-run '
                                      'with --debug for more info.')
                raise

        # Parse args once to find version

        # NOTE(flepied) Under Python3, parsed arguments are removed
        # from the list so make a copy for the first parsing
        base_argv = copy.deepcopy(argv)
        parser = self.get_base_parser(argv)
        (options, args) = parser.parse_known_args(base_argv)

        try:
            # NOTE(flaper87): Try to get the version from the
            # image-url first. If no version was specified, fallback
            # to the api-image-version arg. If both of these fail then
            # fallback to the minimum supported one and let keystone
            # do the magic.
            endpoint = self._get_image_url(options)
            endpoint, url_version = utils.strip_version(endpoint)
        except ValueError:
            # NOTE(flaper87): ValueError is raised if no endpoint is provided
            url_version = None

        # build available subcommands based on version
        try:
            api_version = int(options.os_image_api_version or url_version or 2)
            if api_version not in SUPPORTED_VERSIONS:
                raise ValueError
        except ValueError:
            msg = ("Invalid API version parameter. "
                   "Supported values are %s" % SUPPORTED_VERSIONS)
            utils.exit(msg=msg)

        # Handle top-level --help/-h before attempting to parse
        # a command off the command line
        if options.help or not argv:
            parser = _get_subparser(api_version)
            self.do_help(options, parser=parser)
            return 0

        # NOTE(sigmavirus24): Above, args is defined as the left over
        # arguments from parser.parse_known_args(). This allows us to
        # skip any parameters to command-line flags that may have been passed
        # to glanceclient, e.g., --os-auth-token.
        self._fixup_subcommand(args, argv)

        # short-circuit and deal with help command right away.
        sub_parser = _get_subparser(api_version)
        args = sub_parser.parse_args(argv)

        if args.func == self.do_help:
            self.do_help(args, parser=sub_parser)
            return 0
        elif args.func == self.do_bash_completion:
            self.do_bash_completion(args)
            return 0

        if not options.os_image_api_version and api_version == 2:
            switch_version = True
            client = self._get_versioned_client('2', args)

            resp, body = client.http_client.get('/versions')

            for version in body['versions']:
                if version['id'].startswith('v2'):
                    # NOTE(flaper87): We know v2 is enabled in the server,
                    # which means we should be able to get the schemas and
                    # move on.
                    switch_version = self._cache_schemas(options, client)
                    break

            if switch_version:
                print('WARNING: The client is falling back to v1 because'
                      ' the accessing to v2 failed. This behavior will'
                      ' be removed in future versions', file=sys.stderr)
                api_version = 1

        sub_parser = _get_subparser(api_version)

        # Parse args again and call whatever callback was selected
        args = sub_parser.parse_args(argv)

        # NOTE(flaper87): Make sure we re-use the password input if we
        # have one. This may happen if the schemas were downloaded in
        # this same command. Password will be asked to download the
        # schemas and then for the operations below.
        if not args.os_password and options.os_password:
            args.os_password = options.os_password

        if args.debug:
            # Set up the root logger to debug so that the submodules can
            # print debug messages
            logging.basicConfig(level=logging.DEBUG)
            # for iso8601 < 0.1.11
            logging.getLogger('iso8601').setLevel(logging.WARNING)
        LOG = logging.getLogger('glanceclient')
        LOG.addHandler(logging.StreamHandler())
        LOG.setLevel(logging.DEBUG if args.debug else logging.INFO)

        profile = osprofiler_profiler and options.profile
        if profile:
            osprofiler_profiler.init(options.profile)

        client = self._get_versioned_client(api_version, args)

        try:
            args.func(client, args)
        except exc.Unauthorized:
            raise exc.CommandError("Invalid OpenStack Identity credentials.")
        finally:
            if profile:
                trace_id = osprofiler_profiler.get().get_base_id()
                print("Profiling trace ID: %s" % trace_id)
                print("To display trace use next command:\n"
                      "osprofiler trace show --html %s " % trace_id)
コード例 #24
0
ファイル: MCTS.py プロジェクト: bartmate/Amoeba-Zero
 def reinit(self, game, node):
     self.root_node = node
     self.game = copy.deepcopy(game)
コード例 #25
0
def convert_model(from_popn, from_model, from_vars, to_popn, to_model, to_vars):
    """ Convert from one model to another model of a different type
        Generally this will involve projecting impulse responses, etc.
        It's hairy business.
    """

    # Idea: Get the state of the GLMs, e.g. the impulse responses, etc.
    #       Project those states onto the parameters of the to-model
    N = from_popn.N
    from_state = from_popn.eval_state(from_vars)
    to_state = to_popn.eval_state(to_vars)

    conv_vars = None
    if from_model['impulse']['type'].lower() == 'basis':
        if to_model['impulse']['type'].lower() == 'normalized' or \
           to_model['impulse']['type'].lower() == 'dirichlet':
            import copy
            conv_vars = copy.deepcopy(to_vars)

            # To convert from basis -> normalized, project the impulse
            # responses onto the normalized basis, divide by the area
            # under the curve to get the weight.
            W = np.zeros((N,N))
            for n2 in np.arange(N):
                B = to_state['glms'][n2]['imp']['basis'].shape[1]
                w_ir_n2 = np.zeros((N,B))
                for n1 in np.arange(N):
                    # Solve a nonnegative least squares problem
                    (w_ir_n1n2p, residp) = nnls(to_state['glms'][n2]['imp']['basis'],
                                                from_state['glms'][n2]['imp']['impulse'][n1,:])
                    (w_ir_n1n2n, residn) = nnls(to_state['glms'][n2]['imp']['basis'],
                                                -1.0*from_state['glms'][n2]['imp']['impulse'][n1,:])

                    # Take the better of the two solutions
                    if residp < residn:
                        Wsgn = 1.0
                        w_ir_n1n2 = w_ir_n1n2p
                    else:
                        Wsgn = -1.0
                        w_ir_n1n2 = w_ir_n1n2n

                    # Normalized weights must be > 0, sum to 1
                    w_ir_n1n2 = w_ir_n1n2
                    w_ir_n1n2 = np.clip(w_ir_n1n2,0.001,np.Inf)
                    # Normalize the impulse response to get a weight
                    W[n1,n2] = Wsgn*np.sum(w_ir_n1n2)

                    # Set impulse response to normalized impulse response
                    w_ir_n2[n1,:] = w_ir_n1n2 / np.sum(w_ir_n1n2)

                # Update to_vars
                if to_model['impulse']['type'].lower() == 'normalized':
                    conv_vars['glms'][n2]['imp']['w_lng'] = np.log(w_ir_n2.flatten())
                if to_model['impulse']['type'].lower() == 'dirichlet':
                    for n1 in range(N):
                        # Scale up the weights such that the average is preserved
                        alpha = to_popn.glm.imp_model.alpha
                        B = to_popn.glm.imp_model.B
                        conv_vars['glms'][n2]['imp']['g_%d' % n1] = alpha * B * w_ir_n2[n1,:]

            # Update to_vars
            conv_vars['net']['weights']['W'] = W.flatten()

            # Threshold the adjacency matrix to start with the right level of sparsity
            if 'rho' in to_model['network']['graph'].keys():
                W_sorted = np.sort(np.abs(W.ravel()))
                thresh = W_sorted[np.floor((1.0-2.0*to_model['network']['graph']['rho'])*(N**2-N)-N)]
                conv_vars['net']['graph']['A'] = (np.abs(W) >= thresh).astype(np.int8)
            else:
                conv_vars['net']['graph']['A'] = np.ones((N,N), dtype=np.int8)

    # Copy over the bias
    for n in np.arange(N):
        conv_vars['glms'][n]['bias']['bias'] = from_vars['glms'][n]['bias']['bias']

    # Copy over the background params
    if 'sharedtuningcurves' in to_model['latent'] and \
        from_model['bkgd']['type'] == 'spatiotemporal':
        convert_stimulus_filters_to_sharedtc(from_popn, from_model, from_vars,
                                             to_popn, to_model, conv_vars)

    return conv_vars
コード例 #26
0
ファイル: scc1.py プロジェクト: rejithry/class
            #print 'in loop 2'
            t[str(reversed_graph[i][1])][3].append(str(reversed_graph[j][1]))
    for i in graph:
        #print 'loop3'
        if i not in t:
            t[i]=[0,0,0,[]]
    return t

s='0'

ft = 0

g1 = {}
ft = 0
ift = 0
g1 = copy.deepcopy(graph)
print 'Start'

reversed_graph =  rev(graph)
print reversed_graph
g1 = copy.deepcopy(reversed_graph)
graph = copy.deepcopy(reversed_graph)

l = range(1,15)
#print l
for i in l:
    l[i-1] = str(i)
#print l

print l
for j in l:
コード例 #27
0
ファイル: aliases.py プロジェクト: sv3048/PlotsConfigurations
    'samples': mc
}

aliases['btagSF'] = {
    'expr':
    '(bVeto || (topcr && zeroJet))*bVetoSF + (topcr && !zeroJet)*bReqSF',
    'samples': mc
}

for shift in [
        'jes', 'lf', 'hf', 'lfstats1', 'lfstats2', 'hfstats1', 'hfstats2',
        'cferr1', 'cferr2'
]:

    for targ in ['bVeto', 'bReq']:
        alias = aliases['%sSF%sup' % (targ, shift)] = copy.deepcopy(
            aliases['%sSF' % targ])
        alias['expr'] = alias['expr'].replace('btagSF_shape',
                                              'btagSF_shape_up_%s' % shift)

        alias = aliases['%sSF%sdown' % (targ, shift)] = copy.deepcopy(
            aliases['%sSF' % targ])
        alias['expr'] = alias['expr'].replace('btagSF_shape',
                                              'btagSF_shape_down_%s' % shift)

    aliases['btagSF%sup' % shift] = {
        'expr': aliases['btagSF']['expr'].replace('SF', 'SF' + shift + 'up'),
        'samples': mc
    }

    aliases['btagSF%sdown' % shift] = {
        'expr': aliases['btagSF']['expr'].replace('SF', 'SF' + shift + 'down'),
コード例 #28
0
ファイル: status.py プロジェクト: Haxine/mars-1
 def get_slots(self):
     return copy.deepcopy(self._slots)
コード例 #29
0
ファイル: LOSResult.py プロジェクト: mburger-stsci/nexoclom
    def simulate_data_from_inputs(self, scdata):
        """Given a set of inputs, determine what the spacecraft should see.
        Models should have already been run.
        
        **Outputs**
        """
        # If using a planet-fixed source map, need to set subsolarlon
        if ((self.inputs.spatialdist.type == 'surface map') and
            (self.inputs.spatialdist.coordinate_system == 'planet-fixed')):
            self.inputs.spatialdist.subsolarlon = scdata.subslong.median() * u.rad
        else:
            pass
        
        # This is will work with fitted or non-fitted outputfiles
        self.outid, self.outputfiles, self.npackets, self.totalsource = self.inputs.search()
        if self.npackets == 0:
            raise RuntimeError('No packets found for these Inputs.')

        data = scdata.data
        search_results = self.search()
        iteration_results = []
        
        # Do this step if will need to compute any iteration results
        dist_from_plan = (self._data_setup(data)
                          if None in search_results.values()
                          else None)
        for outputfile, search_result in search_results.items():
            if search_result is None:
                # simulate the data
                output = Output.restore(outputfile)
                
                packets = copy.deepcopy(output.X)
                packets['radvel_sun'] = (packets['vy'] +
                                         output.vrplanet.to(self.unit / u.s).value)
                self.oedge = output.inputs.options.outeredge * 2
                
                # Will base shadow on line of sight, not the packets
                out_of_shadow = np.ones(len(packets))
                self.packet_weighting(packets, out_of_shadow, output.aplanet)
                
                # This sets limits on regions where packets might be
                tree = self._tree(packets[xcols].values)
                
                rad = pd.Series(np.zeros(data.shape[0]), index=data.index)
                npack = pd.Series(np.zeros(data.shape[0]), index=data.index,
                                  dtype=int)
                print(f'{data.shape[0]} spectra taken.')
                for i, spectrum in data.iterrows():
                    rad_, pack_ = self._spectrum_process(spectrum, packets, tree,
                                                         dist_from_plan[i])
                    rad.loc[i] = rad_
                    npack.loc[i] = pack_
                    
                    if len(data) > 10:
                        ind = data.index.get_loc(i)
                        if (ind % (len(data) // 10)) == 0:
                            print(f'Completed {ind + 1} spectra')
                
                iteration_ = {'radiance': rad,
                              'npackets': npack,
                              'totalsource': output.totalsource,
                              'outputfile': outputfile,
                              'out_idnum': output.idnum,
                              'query': scdata.query}
                iteration_result = IterationResult(iteration_)
                modelfile = self.save(iteration_result)
                iteration_result.modelfile = modelfile
                iteration_results.append(iteration_result)
            else:
                print(f'Using saved result {search_result[2]}')
                iteration_result = self.restore(search_result)
                iteration_result.model_idnum = search_result[0]
                iteration_result.modelfile = search_result[2]
                assert len(iteration_result.radiance) == len(data)
                iteration_results.append(iteration_result)

        # combine iteration_results
        self.modelfiles = {}
        for iteration_result in iteration_results:
            self.radiance += iteration_result.radiance
            self.modelfiles[iteration_result.outputfile] = iteration_result.modelfile
            
        # need model rate for this output
        model_rate = self.totalsource / self.inputs.options.endtime.value
        self.atoms_per_packet = 1e23 / model_rate
        self.radiance *= self.atoms_per_packet/1e3  # kR
        self.determine_source_rate(scdata)
        self.atoms_per_packet *= self.sourcerate.unit
        self.outputfiles = self.modelfiles.keys()
        
        print(self.totalsource, self.atoms_per_packet)
コード例 #30
0
'''
if TestActive:
    for i in range(12):
        x,y,t=getViconPos(inds[i])
        posx[inds2[i]]=x
        posy[inds2[i]]=y
'''	
rospy.init_node('zumo_go', anonymous=True)#zumo_go is a node
global tfl
tfl = tf.TransformListener()

pub = rospy.Publisher('/ZumoRefs', String, queue_size=1000)#ZumoRefs is a topic name 

#one zipper merge starting positions
t1 = Thread(target = zumoThread, args = (index1, name1, 0.05, copy.deepcopy(Path.GetDefaultPath(39))))
t1.start()
t2 = Thread(target = zumoThread, args = (index2, name2, 0.05, copy.deepcopy(Path.GetDefaultPath(39))))
t2.start()
t3 = Thread(target = zumoThread, args = (index3, name3, 0.05, copy.deepcopy(Path.GetDefaultPath(39))))
t3.start()
t4 = Thread(target = zumoThread, args = (index4, name4, 0.05, copy.deepcopy(Path.GetDefaultPath(39))))
t4.start()


#t5 = Thread(target = zumoThread, args = (index5, name5, 0.05, copy.deepcopy(Path.GetDefaultPath(45))))
#t5.start()
t5 = Thread(target = zumoThread, args = (index6, name6, 0.05, copy.deepcopy(Path.GetDefaultPath(48))))
t5.start()
t6 = Thread(target = zumoThread, args = (index7, name7, 0.05, copy.deepcopy(Path.GetDefaultPath(48))))
t6.start()