Esempio n. 1
0
def gradient(theta):
    pdb.set_trace()
    write_learned_features(theta)
    print 'getting counts...'
    exp_counts = [fst.LogWeight.ZERO] * (len(f_names) + 1)
    obs_counts = [fst.LogWeight.ZERO] * (len(f_names) + 1)
    for idx, (obs_trellis_file) in enumerate(obs_machines):
        sys.stdout.write('%d \r' % idx)
        sys.stdout.flush()
        obs = fst.read(path + obs_trellis_file)
        #obs_c = fst.read(path + obs_chain_file)
        obs_trellis = apply_weights(obs, theta)
        obs_trellis = renormalize(obs_trellis)
        e_counts, o_counts = get_counts(obs_trellis)
        exp_counts = accumilate_counts(e_counts, exp_counts)
        obs_counts = accumilate_counts(o_counts, obs_counts)

    grad = np.zeros(len(theta))
    for i, o in f_names:
        k = f_names[i, o]
        ok = obs_counts[k]
        ek = exp_counts[k]
        #exp(c)-exp(e)
        s1 = expm(-float(ok))
        s2 = expm(-float(ek))
        grad[k] = s1 - s2
        #print grad[k], '=', s2, '-', s1, i, o
        #pdb.set_trace()
    print '\ngrad computed'
    return grad
Esempio n. 2
0
    def compute_dr_wrt(self, wrt):
        result = ProjectPoints.compute_dr_wrt(self, wrt)
        if result is None:
            return None

        if sp.issparse(result):
            drz = self.z_coords.dr_wrt(wrt).tocoo()
            result = result.tocoo()
            result.row = result.row*3/2
            
            IS = np.concatenate((result.row, drz.row*3+2))
            JS = np.concatenate((result.col, drz.col))
            data = np.concatenate((result.data, drz.data))
            
            result = sp.csc_matrix((data, (IS, JS)), shape=(self.v.r.size, wrt.r.size))
        else:
            try:
                bigger = np.zeros((result.shape[0]/2, 3, result.shape[1]))
                bigger[:, :2, :] = result.reshape((-1, 2, result.shape[-1]))
                drz = self.z_coords.dr_wrt(wrt)
                if drz is not None:
                    if sp.issparse(drz):
                        drz = drz.todense()
                    bigger[:,2,:] = drz.reshape(bigger[:,2,:].shape)

                result = bigger.reshape((-1, bigger.shape[-1]))
            except:
                import pdb; pdb.set_trace()
                    

        return result            
Esempio n. 3
0
def calculateFullList(data_set, data_label, features_num, fn, jump=1):
	doc = []
	prev_id = -1
	for i in range(len(data_set)):
		if not data_set[i][0] == prev_id:
			if not (int(data_set[i][0]) % jump == 0):
				continue
			if doc:
				writeOneList(doc, data_label[int(prev_id)],  fn)
			prev_id = data_set[i][0]
			doc = [ '0' for j in range(features_num) ]
			doc[int(data_set[i][1]) - 1] = data_set[i][2]
			# import pdb; pdb.set_trace()
			# doc[features_num] = data_label[int(data_set[i][0])]
			# tmp_lst = [ '0' for i in range(features_num + 1) ]
			# docs[data_set[i][0]] = tmp_lst
			# docs[data_set[i][0]][int(data_set[i][1])] = data_set[i][2]
			# docs[data_set[i][0]][features_num] = data_label[int(data_set[i][0])]
		else:
			try:
				doc[int(data_set[i][1]) - 1] = data_set[i][2]
			except:
				print "An error has occured and we have entered debug mode."
				import pdb; pdb.set_trace()
	writeOneList(doc, data_label[int(prev_id)], fn)
Esempio n. 4
0
def updates():
	pdb.set_trace()
	d = request.method
	form = request.form
	lat=float(form.keys()[0][1:-1].split(",")[0])
	lon=float(form.keys()[0][1:-1].split(",")[1])
	return 'll'	
Esempio n. 5
0
 def z_coords(self):
     
     try:
         assert(self.v.r.shape[1]==3)
         return RigidTransform(v=self.v, rt=self.rt, t=self.t)[:,2]
     except:
         import pdb; pdb.set_trace()
Esempio n. 6
0
 def __init__(self, type, owner=None, index=None, name=None):
     super(TensorVariable, self).__init__(type, owner=owner,
                                          index=index, name=name)
     if (config.warn_float64 != 'ignore' and type.dtype == 'float64'):
         msg = ('You are creating a TensorVariable '
                'with float64 dtype. You requested an action via '
                'the Theano flag warn_float64={ignore,warn,raise,pdb}.')
         if config.warn_float64 == "warn":
             # Get the user stack. We don't want function inside the
             # tensor and gof directory to be shown to the user.
             x = tb.extract_stack()
             nb_rm = 0
             while x:
                 file_path = x[-1][0]
                 rm = False
                 for p in ["theano/tensor/",
                           "theano/gof/"]:
                     if p in file_path:
                         x = x[:-1]
                         nb_rm += 1
                         rm = True
                         break
                 if not rm:
                     break
             warnings.warn(msg, stacklevel=1 + nb_rm)
         elif config.warn_float64 == "raise":
             raise Exception(msg)
         elif config.warn_float64 == 'pdb':
             import pdb;pdb.set_trace()
Esempio n. 7
0
def main():

  parser = argparse.ArgumentParser(description='Fetch Annotation metadata')
  parser.add_argument('host', action="store", help='HostName')
  parser.add_argument('token', action="store", help='Token')
  parser.add_argument('processes', action="store", type=int, help='Processes')
  
  global result 
  result = parser.parse_args()
  
  # Eastablishing the connection to the database. For now hardcoded to dsp62
  db = MySQLdb.connect(host="dsp062.pha.jhu.edu", user=ocpcaprivate.dbuser, passwd=ocpcaprivate.dbpasswd, db="MP4")
  cur = db.cursor()
  cur.execute("Select annoid from annotations")
  
  idList = []

  # Extracting the ids from the sql database
  for row in cur.fetchall():
    idList.append(int(row[0]));
  
  pdb.set_trace()
  # Launching the Process Pool
  try:
    pool = mp.Pool(result.processes)
    pool.map( ReadAnno, idList)
  except KeyboardInterrupt:
    print " Caught KeyboardInterrupt, terminating workers"
    pool.terminate()
    pool.join()
  else:
    print "Quitting Normally"
    pool.close()
    pool.join()
Esempio n. 8
0
        def plot_grade(self):
            grade_data = []
            for i in range(len(self.route_waypoints)):
                wp1 = self.route_waypoints[i-1]
                wp2 = self.route_waypoints[i]

                rise = wp2.elevation - wp1.elevation
                run = haversine(wp1, wp2)

                grade_data.append(100 * rise / run)
            pdb.set_trace()

            fig, ax = plt.subplots()
            Ntotal = len(grade_data)
            N, bins, patches = ax.hist(grade_data, Ntotal)

            #I'll color code by height, but you could use any scalar

            # we need to normalize the data to 0..1 for the full
            # range of the colormap
            fracs = N.astype(float)/N.max()
            norm = colors.Normalize(fracs.min(), fracs.max())

            for thisfrac, thispatch in zip(fracs, patches):
                color = cm.jet(norm(thisfrac))
                thispatch.set_facecolor(color)

            plt.show()
    def choose_workflow(self, dx_environment_json, develop):

        # Determine appropriate workflow based on required operations
        operations = ['bcl2fastq', 'qc', 'release']    # Default operations for all analyses
        if self.reference_genome_dxid and self.reference_index_dxid:
            operations.append('bwa')
        if self.release:
            operations.append('release')

        if develop:
            workflows = dx_environment_json['development_workflows']
        else:
            workflows = dx_environment_json['production_workflows']

        for workflow_name in workflows:
            workflow = workflows[workflow_name]
            # pdb.set_trace()
            if set(operations) == set(workflow['operations']):
                self.workflow_name = workflow
                self.workflow_id = workflow['id']
                self.workflow_project_id = workflow['project_id']
                self.workflow_json_file = workflow['json_file']
           
                #pdb.set_trace() 
                print "Choosing workflow: %s" % workflow
                return workflow
        print "Error: Could not choose workflow"
        pdb.set_trace()
Esempio n. 10
0
 def gen_vocab(self):
     '''
     Generate vocabulary from 
     '''
     doc_stream = (tokens for tokens in self.doc_stream_gen())
     self.id2word = gensim.corpora.Dictionary(doc_stream)
     pdb.set_trace()
Esempio n. 11
0
def main():
        filter=Filter()
        filter.readfile()
        while True:
                checkpair=raw_input("Please Input 2 authors' id: ")
                checkpair=checkpair.split()
                print "------------------------------------------------------------------------------------------------"

                if filter.query_aff(checkpair) :
                        print "Affiliation True: ",filter.authoraff[checkpair[0]] & filter.authoraff[checkpair[1]]
                        print " "
                else :
                        print "Affiliation False: "
                        if filter.authoraff.has_key(checkpair[0]):
                                print checkpair[0]," : ",filter.authoraff[checkpair[0]]
                        if filter.authoraff.has_key(checkpair[1]):
                                print checkpair[1]," : ",filter.authoraff[checkpair[1]]
                        print " "

                if filter.query_keyword(checkpair) :
                        print "keyword True: ",filter.authorkeywords[checkpair[0]] & filter.authorkeywords[checkpair[1]]
                        print " "
                else :
                        print "keyword False: "
                        if filter.authorkeywords.has_key(checkpair[0]):
                                print checkpair[0],"        ",filter.authorkeywords[checkpair[0]]
                        if filter.authorkeywords.has_key(checkpair[1]):
                                print checkpair[1],"        ",filter.authorkeywords[checkpair[1]]
                        print " "


                pdb.set_trace()
Esempio n. 12
0
	def parse(self, response) :
		print ">>>>>", response.request.url 
		sel = Selector (response)
		items=[]
		item =  AmazonItem()
		item['url'] = response.request.url
		#import pdb;pdb.set_trace()
		#item['href'] = sel.xpath('//div[@class="a-row a-spacing-none"]/a[@class="a-link-normal a-text-normal"]/@href').extract()
		title = str(list(map(unicode.strip,sel.xpath('//span[@id="productTitle"]/text()').extract())))
		title = title.replace("u'","").replace("[","").replace("]","")
		item['title'] = title
		brand = str(list(map(unicode.strip,sel.xpath('//a[@id="brand"]/text()').extract())))
		brand = brand.replace("u'","").replace("[","").replace("]","")
		item['brand'] = brand
		import pdb;pdb.set_trace()
		price = str(list(map(unicode.strip,sel.xpath('//span[@class="olp-padding-right"]//span[@class="a-color-price"]//text()').extract())))
		price = price.replace("Rs.","").replace("[","").replace("]","").replace("u'","").replace(",","").replace("'","").replace("$","")
		item['price'] = float(price)*67.30
		desc = str(list(map(unicode.strip,sel.xpath('//div/ul[@class="a-vertical a-spacing-none"]/li//text()').extract())))
		desc = desc.replace("[","").replace("]","").replace("u'","")
		item['desc'] = desc
		category = str(list(map(unicode.strip,sel.xpath('//div[@data-feature-name="wayfinding-breadcrumbs"]/ul/li//a//text()').extract())))
		category = category.replace("u'","").replace("[","").replace("]","").replace('"','').replace("'","")
		item['category'] = category
		if item['title'] and item['category'] and item['price']:
			return item
Esempio n. 13
0
    def _find_reference_cycle(self):
        # Shorthand variables, useful if attached with PDB.
        # g = "unfetchable objects"
        # i2o = "id to object, for objects in `g`"
        # gr = "get referrers of an object in the unfetchable objects"
        # gri = "get the id of the above referrers"
        g = gc.garbage
        i2o = {id(o): o for o in g}
        gr = lambda o: [r for r in gc.get_referrers(o) if id(r) in i2o]
        gri = lambda o: [id(r) for r in gr(o)]
        
        # Find a loop by walking unfetched objects, stepping to an arbitrary
        # referrer each time. When an object that has already been encountered
        # is encountered again a loop has been found.
        #
        # The loop is described in terms of object ids, to avoid having to
        # invoke objects' __eq__ method.
        def find_loop(start_idx=0):
            path = [id(g[start_idx])]
            while True:
                path.append(gri(i2o[path[-1]])[0])
                # This check could be made more efficient using a set to track
                # elements in `path`.
                if path[-1] in path[:-1]:
                    return path[path.index(path[-1]):]

        loop = find_loop()
        logger.error("Reference cycle of size {} found:".format(len(loop) - 1))
        for obj_id in loop:
            logger.error("    {!r} (id: {})".format(i2o[obj_id], obj_id))

        if _DEBUG_REFERENCE_CYCLES:
            loop = [i2o[o] for o in loop]
            import pdb
            pdb.set_trace()
Esempio n. 14
0
def failnbail_transformation(msg):
    msg = '%s\n' % msg
    if we_are_translated():
        llop.debug_print(lltype.Void, msg)
    else:
        import pdb; pdb.set_trace()
    raise NotImplementedError(msg)
Esempio n. 15
0
 def save(self, commit=True):
     import pdb;pdb.set_trace()
     user = super(UserForm, self).save(commit=False)
     user.set_password(self.cleaned_data['password'])
     if commit:
         user.save()
     return user
Esempio n. 16
0
def check_TYT_and_mishnayot():
	how_many = 0
	issues = open("issues", 'w')
	num_TYTs, actual_TYTs = get_num_TYTs_per_perek()
	num_mishnayot = get_num_mishnayot_per_perek()
	for masechet in sorted(num_TYTs.keys()):
		print masechet
		for perek in range(len(num_TYTs[masechet])):
			if (masechet + str(perek)) in alreadyFound:
				continue
			mishnah = num_mishnayot[masechet][perek]
			TYT = num_TYTs[masechet][perek]
			try:
				if TYT != mishnah:
					how_many += 1
					alreadyFound[masechet + str(perek)] = True
					issues.write(masechet+", perek "+str(perek+1)+", mishnah_count "+str(mishnah)+", TYT count "+str(TYT)+"\n")
					issues.write("The Tosafot Yom Tov tags are the following: \n")
					for each_one in actual_TYTs[masechet][perek]:
						issues.write(each_one.encode('utf-8')+", ")
					issues.write("\n\n\n\n")
			except:
				pdb.set_trace()
	print how_many
	issues.close()
 def transmit(self):
     if self.sequence == None:
         print('cannot send undefined sequence to channel')
         pdb.set_trace()
     super().transmit()
     for sock in self.clients:
         pickle_send(sock, self.sequence)
Esempio n. 18
0
    def gexpect(self, tag):
        log.info('GAME_EXPECT: %s', repr(tag))
        if not self.gdlist:
            log.info('Game data exhausted, exiting...')
            if options.catch:
                import pdb
                pdb.set_trace()
            sys.exit(0)

        glob = False
        if tag.endswith('*'):
            tag = tag[:-1]
            glob = True

        missed = False
        for i, d in enumerate(self.gdlist):
            if d[0] == tag or (glob and d[0].startswith(tag)):
                log.info('GAME_READ: %s', repr(d))
                del self.gdlist[i]
                return d
            if not missed:
                log.info('GAME_DATA_MISS: %s', repr(d))
                missed = True

        log.info('GAME_DATA_MISS!!')
        log.info('Remaining game data:')
        for i in self.gdlist:
            log.info(repr(i))

        sys.exit(1)
    def v1_lookup_response_to_v3_get_response(self, v1_response, v3_response):
        v3_response.Clear()
        import pdb; pdb.set_trace()
        for entity_result_v1 in v1_response.found:
            v3_entity = v3_response.add_entity()

            self.v1_to_v3_entity(entity_result_v1.entity, v3_entity.mutable_entity())
Esempio n. 20
0
    def get_data_from_archive(problems, algorithms, Configurations):
        from PerformanceMeasures.DataFrame import ProblemFrame
        problem_dict = {}
        for problem in problems:
            data = ProblemFrame(problem, algorithms)
            generation_dict = {}
            for generation in xrange(Configurations["Universal"]["No_of_Generations"]):
                population = data.get_frontier_values(generation)
                evaluations = data.get_evaluation_values(generation)

                repeat_dict = {}
                for repeat in xrange(Configurations["Universal"]["Repeats"]):
                    algorithm_dict = {}
                    for algorithm in algorithms:
                        algorithm_dict[algorithm.name] = {}
                        try:
                            candidates = [jmoo_individual(problem, pop.decisions, pop.objectives) for pop in
                                          population[algorithm.name][repeat]]
                        except:
                            import pdb
                            pdb.set_trace()
                        repeat_dict[str(repeat)] = {}
                        if len(candidates) > 0:
                            algorithm_dict[algorithm.name]["Solutions"] = candidates
                            algorithm_dict[algorithm.name]["Evaluations"] = evaluations[algorithm.name][repeat]
                        else:
                            algorithm_dict[algorithm.name]["Solutions"] = None
                            algorithm_dict[algorithm.name]["Evaluations"] = None

                    repeat_dict[str(repeat)] = algorithm_dict
                generation_dict[str(generation)] = repeat_dict
            problem_dict[problem.name] = generation_dict
        return problem_dict
    def parseFDOutput(self, fdStr, planCount):
        "Planner mode for parsing: FD"
        relevantPrefixStr = ""
        relevantSegment = ""
        stateList = []

        if not "Solution found!" in fdStr:
            print "Solution not found. Error"
            sys.exit(-1)

        if "End state list" in fdStr:
            stateListSegments = fdStr.split("End state list")
            print "Found " + repr(len(stateListSegments) - 1) + " state lists"
            print "using list #" + repr(planCount)
            relevantSegment = stateListSegments[planCount - 1].split("Begin state list")[1]
        else:
            print "Output from planner garbled"
            pdb.set_trace()

        # get pruned atoms
        prunedList = fdStr.partition("Translating task:")[0].split("\n")
        pruneLines = filter(lambda x: "pruned" in x and "=" not in x, prunedList)
        prunedFacts = [s.replace("pruned static init fact: Atom ", "") for s in pruneLines]
        constantState = self.getStateFromStr("\n".join(prunedFacts))

        for atomStateStr in relevantSegment.split(stateDelimiter):
            stateStr = atomStateStr.replace("Atom ", "").strip()
            if len(stateStr) > 0:
                s = self.getStateFromStr(stateStr)
                if s.size() > 0:
                    s.patch(constantState)
                    stateList.append(s)

        self.stateList = stateList
        return stateList
Esempio n. 22
0
 def cmd_debug(self, argv, help):
     """Prints some debug info for this script"""
     parser = argparse.ArgumentParser(
         prog="aws debug",
         description=help,
     )
     parser.add_argument("server", nargs=1,
                         metavar="instance",
                         help="Name of the instance from the config.",
                         choices=list(self.ec2.instances))
     parser.add_argument("-v", "--verbose", dest="verbose",
                       action="store_true", help="Print more info")
     parser.add_argument("-i", "--interactive", dest="interactive",
                       action="store_true", help="Creates a connection and drops you into pdb")
     parser.add_argument("-o", "--override", nargs="*", type=str,
                         dest="overrides", metavar="OVERRIDE",
                         help="Option to override server config for startup script (name=value).")
     args = parser.parse_args(argv)
     overrides = self._parse_overrides(args)
     server = self.ec2.instances[args.server[0]]
     opts = server.config.copy()
     opts.update(overrides)
     startup_script = server.startup_script(opts)
     log.info("Length of startup script: %s/%s", len(startup_script), 16*1024)
     if args.verbose:
         log.info("Startup script:")
         print startup_script,
     if args.interactive:
         conn = server.conn
         instance = server.instance
         conn, instance # shutup pyflakes
         from pdb import set_trace
         set_trace()
Esempio n. 23
0
 def _load_measures(self, tree):
     """
     Finds all measures in the Scenario by recursively scanning the parsed json and organizes them by type
     (i.e. table) and a "bucket_id", which is a tuple of subsector/node id and technology id. If the particular
     measure doesn't applies to a whole subsector/node rather than a technology, the second member of the
     bucket_id tuple will be None.
     """
     for key, subtree in tree.iteritems():
         if key.lower() == 'sensitivities':
             self._load_sensitivities(subtree)
         elif isinstance(subtree, dict):
             self._load_measures(subtree)
         elif key in self.MEASURE_CATEGORIES and isinstance(subtree, list):
             for measure in subtree:
                 try:
                     bucket_id = self._bucket_lookup[key][measure]
                 except KeyError:
                     raise ValueError("{} scenario wants to use {} {} but no such measure was found in the database.".format(self._id, key, measure))
                 if measure in self._measures[key][bucket_id]:
                     raise ValueError("Scenario uses {} {} more than once.".format(key, measure))
                 self._measures[key][bucket_id].append(measure)
         elif not isinstance(subtree, basestring):
             pdb.set_trace()
             raise ValueError("Encountered an uninterpretable non-string leaf node while loading the scenario. "
                              "The node is '{}: {}'".format(key, subtree))
Esempio n. 24
0
    def get(self):
        '''Get all quizzes'''
        print "_______________________________________________"
        print "QuizzesAPI get fn: %s" %(request)

        '''Query from quiz table'''
        Query_obj = qzdb.Quiz.query.order_by(qzdb.Quiz.qzid).all()
        import pdb; pdb.set_trace()
        if not Query_obj:
            response = handle_invalid_usage(InvalidUsage('Error: No quizzes found',status_code=404))
            return response

        '''Return response'''
        response_fields = [{'name':'qzid', 'relnshp':False,'subfields':None},
                           {'name':'title','relnshp':False,'subfields':None}, 
                           {'name':'difficulty_level','relnshp':False,'subfields':None}, 
                           {'name':'text','relnshp':False,'subfields':None}, 
                           {'name':'no_ques','relnshp':False,'subfields':None}] 
        quizzes = utls.serialize_to_json(response_fields, Query_obj)
        print "Json response"
        print "=============\n"
        print '{\'quizzes\':%s}\n' %(quizzes)
        response = jsonify(quizzes=quizzes)
        response.status_code = 200
        utls.display_tables()
        return response
Esempio n. 25
0
def main(**kwargs):
    model = Model(**kwargs)
    # model.findBestClustering()
    model.writeSegments(nClusters=5)
    import pdb

    pdb.set_trace()
Esempio n. 26
0
    def from_arrays(cls, workstate, sali, info_ind, dates=None):
        if isinstance(sali, DataFrame):
            assert isinstance(workstate, DataFrame)
            try:
                assert all(sali.index == workstate.index) and all(sali.index == info_ind.index)
            except:
                assert all(sali.index == workstate.index)
                assert len(sali) == len(info_ind)
                sal = sali.index
                idx = info_ind.index
                assert all(sal[sal.isin(idx)] == idx[idx.isin(sal)])
                # si on coince à ce assert ici c'est que l'ordre change
                print(sal[~sal.isin(idx)])
                print(idx[~idx.isin(sal)])
                # un décalage ?
                decal = idx[~idx.isin(sal)][0] - sal[~sal.isin(idx)][0]
                import pdb
                pdb.set_trace()

            # TODO: should be done before
            assert sali.columns.tolist() == workstate.columns.tolist()
            assert sali.columns.tolist() == (sorted(sali.columns))
            dates = sali.columns.tolist()
            sali = array(sali)
            workstate = array(workstate)

        if isinstance(sali, ndarray):
            assert isinstance(workstate, ndarray)
            sali = TimeArray(sali, dates, name='sali')
            workstate = TimeArray(workstate, dates, name='workstate')

        assert in1d(info_ind['sexe'], [0, 1]).all()
        return PensionData(workstate, sali, info_ind)
Esempio n. 27
0
 def __call__(self, tag):
     u = xml2dict(tag)
     d = self.clean(u)
             
     try:
         if d is not None:
             try: #update (deleting from a RDBMS updates FK)
                 m = self.model.objects.get(pk=d['id'])
                 del d['id']
                 for k,v in d.items():
                     setattr(m, k, v)
                 m.save()             
             except self.model.DoesNotExist:
                 m = self.model(**d)
                 m.save()
             self.count += 1
             if self.count % 100 == 0:
                 print "saved %s items" % self.count
         else: #d is none (fail)
             self.fails += 1
             if self.fails % 10 == 0:
                 print "SKIPPED %s items" % self.fails
     
     except Exception as e:
         if DEBUG_ON_IMPORT_SAVE_ERROR:
             from pprint import pprint
             pprint(e)
             pprint(u)
             pprint(d)
             import pdb; pdb.set_trace()
         else:
             raise e
Esempio n. 28
0
    def __init__(self, exeCore):
        """onboard the test framework"""
        self.execore = exeCore
        import pdb;pdb.set_trace()
        self.perl_script = os.path.join(self.execore.productDir,'core/tstcore/tvm/resources','voip_testcalls.pl')
        self.test_group = "VoIPTest"
        args = {'network': 'tvm_comms', 'subnet_name': 'tvm_comms-sub', 'cidr': '192.168.99.0/24'}
        comms_nw_id = self.execore.createNetwork(**args)
        args = {'network': 'tvm_test', 'subnet_name': 'tvm_test-sub', 'cidr': '192.168.2.0/24'}
        test_nw_id = self.execore.createNetwork(**args)
        args = {'network': 'tvm_mgmt', 'subnet_name': 'tvm_mgmt-sub', 'cidr': '192.168.0.0/24'}
        mgmt_nw_id = self.execore.createNetwork(**args)
        #TODO: Router ID should be from config file instead of hardcoding
        args = {'router_name': 'Router', 'external_network_name': 'public'}
        router_id = self.execore.createRouter(**args)

        self.NSD = os.path.join(self.execore.productDir,'core/tstcore/tvm/descriptor','tvm.zip')
        self.templateID = self.execore.onBoard('teravm', self.NSD)

        csObj = self.execore.createServiceRequestObject(router='Router', networks= {'comms-network': comms_nw_id, 'test-network': test_nw_id, 'mgmt': mgmt_nw_id})
        csObj['name'] = 'Tvm'
        csObj['qos'] = 'Voip'
        tvm_srv = self.execore.createService(self.templateID, **csObj)
         
        self.local_path = self.execore.sessionDir
        self.result_filename = "tvm_results.zip"
        self.testCaseMap = { 'TEST_SIP_CALLS' : '__analyze_logs__',
                             'TEST_SIP_CALLS1' : '__analyze_logs1__'}
        super(testCore, self).__init__()
Esempio n. 29
0
def load_oggs(filename):
    ogg_db = sqlite3.connect(filename)
    ogg_cursor = ogg_db.execute('select img_name, length, img_size, img_user_text, img_timestamp, channels from audio_metadata')
    all_oggs = ogg_cursor.fetchall()
    unique_users = Counter([o[3] for o in all_oggs])
    
    import pdb;pdb.set_trace()
Esempio n. 30
0
    def get_game_input(self, key):
        if key.pressed:
            if key.lctrl and key.rctrl and key.c == ord('d'):
                Game.debug = False if Game.debug else True
            if key.vk == libtcod.KEY_F11:
                Game.fullscreen = False if Game.fullscreen else True
                self.bring_up_root()
            if key.vk == libtcod.KEY_F5:
                Game.toggle_sidebar()
        if Game.debug:
            if key.pressed:
                mod = key.lctrl
                if mod and key.c == ord('c'):
                    Game.collidable = False if Game.collidable else True
                    print('toggle collision: {}'.format(Game.collidable))
                # TODO get working again
                if mod and key.c == ord('-') and Game.fps > 0:
                    Game.fps -= 1
                    print("fps: %d" % Game.fps)
                    libtcod.sys_set_fps(type(self).fps)
                if mod and key.c == ord('='):
                    Game.fps += 1
                    print("fps: %d" % Game.fps)
                    libtcod.sys_set_fps(type(self).fps)
                if mod and key.c == ord('0'):
                    Game.fps = 0
                    print("fps unlimited")
                    libtcod.sys_set_fps(type(self).fps)
                if mod and key.vk == libtcod.KEY_1:
                    Game.fps = Game.default_fps
                    print("fps default: %d" % Game.default_fps)
                    libtcod.sys_set_fps(type(self).fps)
                if mod and key.vk == libtcod.KEY_2:
                    Game.fps += Game.default_fps
                    print("fps: %d" % Game.fps)
                    libtcod.sys_set_fps(type(self).fps)

                CTRL_R_BRACKET = 29
                CTRL_L_BRACKET = 27
                if key.c == CTRL_L_BRACKET and key.lctrl and Game.loaded_block_radius > 0:
                #if key.c == ord("[") and key.lalt and Game.loaded_block_radius > 0:
                    Game.loaded_block_radius -= 1
                    print("loaded block radius: %d" % Game.loaded_block_radius)
                if key.c == CTRL_R_BRACKET and key.lctrl:
                    Game.loaded_block_radius += 1
                    print("loaded block radius: %d" % Game.loaded_block_radius)

                font_changed = False
                if mod and key.c == ord('-') and self.font_handler.decrease_font():
                        font_changed = True
                if mod and key.c == ord('=') and self.font_handler.increase_font():
                        font_changed = True
                if font_changed:
                    print('set font')
                    libtcod.console_delete(0)
                    self.bring_up_root()

                if key.shift and mod and key.c == ord('d'):
                    import pdb
                    pdb.set_trace()
Esempio n. 31
0
def d():
    pdb.set_trace()
Esempio n. 32
0
    return query(buildRequest(vars()))


# Transaction
def TransactionLookup(
    TransactionId, ResponseGroup=None, AWSAccessKeyId=None, AssociateTag=None
):
    """TransactionLookup in AWS"""
    return rawIterator(
        XMLTransactionLookup, vars(), "Transactions", __plugins["TransactionLookup"]
    )


def XMLTransactionLookup(
    TransactionId, ResponseGroup=None, AWSAccessKeyId=None, AssociateTag=None
):
    """DOM representation of TransactionLookup in AWS"""

    Operation = "TransactionLookup"
    return query(buildRequest(vars()))


if __name__ == "__main__":
    setLicenseKey("1MGVS72Y8JF7EC7JDZG2")
    ItemSearch(
        "XML Python", SearchIndex="Books", MerchantId="All", ResponseGroup="OfferFull"
    )
    import pdb

    pdb.set_trace()
Esempio n. 33
0
    def raceWatcher(self, task):
        kart = base.cr.doId2do.get(self.kartMap.get(localAvatar.doId, None), None)
        if self.localKart.amIClampingPosition():
            self.notify.debug('teleporting kart %d back to main track' % localAvatar.doId)
            self.localKart.setPos(self.curvePoints[self.currentPole])
        kartPoint = self.localKart.getPos()
        direction = 0
        while True:
            currPoint = self.curvePoints[self.currentPole]
            nextPole = (self.currentPole + 1) % len(self.curvePoints)
            nextPoint = self.curvePoints[nextPole]
            segment = nextPoint - currPoint
            segment.setZ(0)
            segLength2 = segment.lengthSquared()
            kartVector = kartPoint - currPoint
            kartVector.setZ(0)
            project = segment * (segment.dot(kartVector) / segLength2)
            projLength2 = project.lengthSquared()
            if project.dot(segment) < 0:
                if direction == 1:
                    break
                prevPole = (self.currentPole - 1) % len(self.curvePoints)
                self.currentPole = prevPole
                direction = -1
            elif projLength2 > segLength2:
                if direction == -1:
                    break
                self.currentPole = nextPole
                direction = 1
            else:
                break

        if self.dummyNode:
            self.dummyNode.setPos(kartPoint[0], kartPoint[1], 0)
            self.dummyNode.setHpr(self.localKart.getH(), 0, 0)
        t = projLength2 / segLength2
        if self.debugIt:
            self.notify.debug('self.debugIt = %d' % self.debugIt)
            import pdb
            pdb.set_trace()
        if nextPole < self.currentPole:
            newT = self.curveTs[self.currentPole] * (1 - t) + self.curve.getMaxT() * t
        else:
            newT = self.curveTs[self.currentPole] * (1 - t) + self.curveTs[nextPole] * t
        kartDirection = self.localKart.forward.getPos(render) - self.localKart.getPos(render)
        kartDirection.normalize()
        project.normalize()
        globalDirection = kartDirection.dot(project)
        if globalDirection < 0:
            self.wrongWay = True
        elif globalDirection > 0.1:
            self.wrongWay = False
        newLapT = (newT - self.startT) / self.curve.getMaxT() % 1.0
        if newLapT - self.currLapT < -0.5:
            self.laps += 1
            self.changeMusicTempo(1 + self.laps * 0.5)
            self.notify.debug('crossed the start line: %s, %s, %s, %s' % (self.laps,
             self.startT,
             self.currT,
             newT))
        elif newLapT - self.currLapT > 0.5:
            self.laps -= 1
            self.changeMusicTempo(1 + self.laps * 0.5)
            self.notify.debug('crossed the start line - wrong way: %s, %s, %s, %s' % (self.laps,
             self.startT,
             self.currT,
             newT))
        self.currT = newT
        self.currLapT = newLapT
        if self.isUrbanTrack:
            self.showBuildings(self.currT)
        now = globalClock.getFrameTime()
        timestamp = globalClockDelta.localToNetworkTime(now)
        if self.laps == self.lapCount:
            self.sendUpdate('heresMyT', [localAvatar.doId,
             self.laps,
             self.currLapT,
             timestamp])
            self.fsm.request('finished')
        if self.laps > self.maxLap:
            self.maxLap = self.laps
            self.sendUpdate('heresMyT', [localAvatar.doId,
             self.laps,
             self.currLapT,
             timestamp])
        if now - self.lastTimeUpdate > 0.5:
            self.lastTimeUpdate = now
            self.sendUpdate('heresMyT', [localAvatar.doId,
             self.laps,
             self.currLapT,
             timestamp])
        self.gui.updateRacerInfo(localAvatar.doId, curvetime=self.currLapT + self.laps)
        self.gui.update(now)
        return Task.cont
Esempio n. 34
0
def myself_2(request):
    print(request)
    import pdb
    pdb.set_trace()
    return HttpResponse("YES!")
Esempio n. 35
0
 def _is_converged(self, present_loss, previous_loss, loop_idx):
     try:
         (previous_loss - present_loss) / np.abs(present_loss) < self.convergence_threshold
     except RuntimeWarning:
         pdb.set_trace()
     return (previous_loss - present_loss) / np.abs(present_loss) < self.convergence_threshold
Esempio n. 36
0
    def text_preproc(self, file_list):
        '''
        Create the following:

        (can get from the sentence file alone)
        self.utt_ids
        self.utt2spk
        self.tok2spk
        self.tok2utt
        self.utt2toks

        (can get from terminal file alone)
        self.tok2times
        self.tok2tokstr

        (have to use info from both)
        self.utt2text
        self.utt2startend
        self.utt2tokentimes
        self.utt2frames

        (annotation files)
        self.tok2tone
        self.utt2tones
        self.tok2infostat
        '''
        pw2term = {}
        term2pw = {}

        for file in file_list:

            conversation, speaker, _, _ = file.strip().split('.')
            print(conversation)
            # Map from terminals to phonwords
            term_path = os.path.join(
                self.swbd_dir, 'terminals',
                '.'.join([conversation, speaker, 'terminals', 'xml']))
            term_file = open(term_path, "r")
            term_contents = term_file.read()
            term_soup = BeautifulSoup(term_contents, 'lxml')
            terminals = term_soup.find_all('word')
            for terminal in terminals:
                terminal_num = terminal['nite:id']
                start = terminal['nite:start']
                pos = terminal['pos']
                if not start == 'non-aligned':
                    term_id = SwbdPreprocessor.get_id(conversation,
                                                      terminal_num)
                    phonwords = terminal.find_all('nite:pointer')
                    if phonwords:
                        pw_num = SwbdPreprocessor.extract_id_from_href(
                            phonwords[0]['href'])
                        pw_id = SwbdPreprocessor.get_id(conversation, pw_num)
                        pw2term[pw_id] = term_id
                        term2pw[term_id] = pw_id
                        self.tok2pos[pw_id] = pos

            # Put all the phonwords in the appropriate dictionaries. Ignore terminals that are not aligned with phonwords
            pw_path = os.path.join(
                self.swbd_dir, 'phonwords',
                '.'.join([conversation, speaker, 'phonwords', 'xml']))
            pw_file = open(pw_path, 'r')
            pw_contents = pw_file.read()
            pw_soup = BeautifulSoup(pw_contents, 'lxml')
            pws = pw_soup.find_all('phonword')
            for pw in pws:
                pw_num = pw['nite:id']
                pw_id = SwbdPreprocessor.get_id(conversation, pw_num)

                if pw_id in pw2term:
                    self.tok2tokstr[pw_id] = SwbdPreprocessor.text_reg(
                        pw['orth'])
                    self.tok2times[pw_id] = (float(pw['nite:start']),
                                             float(pw['nite:end']))

                    if pw_id in self.correction:
                        self.tok2times[pw_id] = (float(self.correction[pw_id]),
                                                 float(pw['nite:end']))
                    self.tok2conv[pw_id] = conversation
                    if conversation in self.conv2tok:
                        self.conv2tok[conversation].append(pw_id)
                    else:
                        self.conv2tok[conversation] = [pw_id]

            syntax_path = os.path.join(
                self.swbd_dir, 'syntax',
                '.'.join([conversation, speaker, 'syntax', 'xml']))
            syn_file = open(syntax_path, "r")
            syn_contents = syn_file.read()
            syn_soup = BeautifulSoup(syn_contents, 'lxml')
            sentences = syn_soup.find_all('parse')

            # Open the sentence file, and pull out all the sentences and tokens in those sentences
            for sentence in sentences:
                sentence_num = sentence['nite:id']
                utt_id = SwbdPreprocessor.get_id(conversation, sentence_num)
                sentence_terminals = sentence.find_all('nite:child')
                for terminal in sentence_terminals:
                    terminal_num = SwbdPreprocessor.extract_id_from_href(
                        terminal['href'])
                    term_id = SwbdPreprocessor.get_id(conversation,
                                                      terminal_num)
                    if term_id in term2pw:
                        pw_id = term2pw[term_id]
                        if utt_id in self.utt2toks:
                            gap = self.tok2times[pw_id][0] - self.tok2times[
                                self.utt2toks[utt_id][-1]][-1]
                            if gap > self.gap_threshold:
                                utt_id = self.utt_id_incr(utt_id)
                        if conversation in self.conv2utt:
                            if not utt_id == self.conv2utt[conversation][-1]:
                                self.conv2utt[conversation].append(utt_id)
                        else:
                            self.conv2utt[conversation] = [utt_id]

                        self.utt2spk[utt_id] = speaker
                        self.utt2conv[utt_id] = conversation

                        if pw_id in self.tok2tokstr:  # check that it's a word, not a silence or a contraction
                            self.tok2spk[pw_id] = speaker
                            if utt_id in self.utt2toks:
                                self.utt2toks[utt_id].append(pw_id)
                            else:
                                self.utt2toks[utt_id] = [pw_id]
                            self.tok2utt[pw_id] = utt_id

            # Now find infostatus for tokens

            # First use the syntax file to map non-terminals to terminals
            nt2term = {}
            syn_non_terminals = syn_soup.find_all('nt')
            for nt in syn_non_terminals:
                nt_id = nt['nite:id']
                terms = nt.find_all('nite:child')
                for term in terms:
                    term_num = SwbdPreprocessor.extract_id_from_href(
                        term['href'])
                    term_id = SwbdPreprocessor.get_id(conversation, term_num)
                    if nt_id in nt2term:
                        nt2term[nt_id].append(term_id)
                    else:
                        nt2term[nt_id] = [term_id]

            # Then go through the markables files and assign the infostat to terminals
            infostruc_path = os.path.join(
                self.swbd_dir, 'markable',
                '.'.join([conversation, speaker, 'markable', 'xml']))
            infostruc_file = open(infostruc_path, 'r')
            infostruc_contents = infostruc_file.read()
            infostruc_soup = BeautifulSoup(infostruc_contents, 'lxml')
            markables = infostruc_soup.find_all('markable')
            for markable in markables:
                try:
                    infostat = markable['status']
                    if not infostat in self.allowed_infostats:
                        infostat = None
                except:
                    infostat = None
                syn_nt = SwbdPreprocessor.extract_id_from_href(
                    markable.find_all('nite:pointer')[0]['href'])
                if syn_nt in nt2term:
                    for term in nt2term[syn_nt]:
                        if term in term2pw:
                            if term2pw[term] in self.tok2utt:
                                self.tok2infostat[term2pw[term]] = infostat
                elif syn_nt in term2pw:  # sometimes the markable is marked on a terminal, not a non-terminal
                    if term2pw[term] in self.tok2utt:
                        self.tok2infostat[term2pw[
                            syn_nt]] = infostat  # added condition that tok has to be in tok2utt
            for tok in self.conv2tok[conversation]:
                if tok not in self.tok2infostat and tok in self.tok2utt:  # added condition that tok has to be in tok2utt
                    self.tok2infostat[tok] = None

            accent_path = os.path.join(
                self.swbd_dir, 'accent',
                '.'.join([conversation, speaker, 'accents', 'xml']))
            if os.path.exists(accent_path):
                accent_file = open(accent_path, 'r')
                accent_contents = accent_file.read()
                accent_soup = BeautifulSoup(accent_contents, 'lxml')
                accents = accent_soup.find_all('accent')
                for accent in accents:
                    pw_num = SwbdPreprocessor.extract_id_from_href(
                        accent.find_all('nite:pointer')[0]['href'])
                    pw_id = SwbdPreprocessor.get_id(conversation, pw_num)
                    if pw_id in self.tok2utt:
                        self.tok2tone[
                            pw_id] = 1  # added condition that tok has to be in tok2utt
                for pw in self.conv2tok[conversation]:
                    if pw not in self.tok2tone:
                        if pw_id in self.tok2utt:
                            self.tok2tone[
                                pw] = 0  # added condition that tok has to be in tok2utt

            kontrast_path = os.path.join(
                self.swbd_dir, 'kontrast',
                '.'.join([conversation, 'kontrast', 'xml']))
            found = 0
            not_found = 0
            if os.path.exists(kontrast_path):
                kontrast_file = open(kontrast_path, 'r')
                kontrast_contents = kontrast_file.read()
                kontrast_soup = BeautifulSoup(kontrast_contents, 'lxml')
                kontrasts = kontrast_soup.find_all('kontrast')
                for kontrast in kontrasts:
                    kontrast_type = kontrast['type']
                    terms = kontrast.find_all('nite:child')
                    term_ids = [
                        '_'.join([
                            conversation,
                            SwbdPreprocessor.extract_id_from_href(term['href'])
                        ]) for term in terms
                    ]
                    for term_id in term_ids:
                        if term_id in term2pw:
                            self.tok2kontrast[term2pw[term_id]] = kontrast_type

                for tok in self.conv2tok[conversation]:
                    if conversation == 'sw2295':
                        import pdb
                        pdb.set_trace()
                    if tok not in self.tok2kontrast and tok in self.tok2utt:  # added condition that tok has to be in tok2utt
                        self.tok2kontrast[tok] = None
        print('a')

        self.utt_ids = list(self.utt2toks.keys())
        broken_toks = []
        for tok in self.tok2times:
            if self.tok2times[tok][0] == self.tok2times[tok][1]:
                broken_toks.append(tok)

        print('b')
        for utt_id in self.utt2toks:
            utt_start = self.tok2times[self.utt2toks[utt_id][0]][0]
            self.utt2tokentimes[utt_id] = [
                float(self.tok2times[tok][0]) for tok in self.utt2toks[utt_id]
            ] + [self.tok2times[self.utt2toks[utt_id][-1]][-1]]
            self.utt2startend[utt_id] = (self.utt2tokentimes[utt_id][0],
                                         self.utt2tokentimes[utt_id][-1])
            self.utt2text[utt_id] = [
                self.tok2tokstr[tok] for tok in self.utt2toks[utt_id]
            ]
            self.utt2frames[utt_id] = torch.tensor([
                int(round(float(tim - utt_start) * 100))
                for tim in self.utt2tokentimes[utt_id]
            ],
                                                   dtype=torch.float32)
        print('c')
        self.make_BIO()
        print('d')
        self.make_new_tags()
        print('e')
        self.make_old_tags()
        print('f')
        self.make_kontrast_tags()
        print('g')
Esempio n. 37
0
def Ace2json(entity_dir, rel_dir, token_dict_offset1, token_dict_offset2,
             txtfn, docs, nercount, relcount, sentcount):
    fn = txtfn.split('/')[-1].replace('.split.txt', '')
    print(fn)
    fid = open(txtfn)
    text = fid.read()
    text = text.rstrip()
    sentences = text.split('\n')
    sentences = [line.split() for line in sentences]
    sentcount += len(sentences)
    sentence_ids = []
    i = 0
    for sentence in sentences:
        ids = []
        for word in sentence:
            ids.append(i)
            i += 1
        sentence_ids.append(ids)

    ner = [[] for i in range(len(sentences))]
    relations = [[] for i in range(len(sentences))]

    idCluster2spans = {}
    for entity in entity_dir:
        offsets = tuple(entity_dir[entity]['offset'])
        if offsets[0] in token_dict_offset1:
            offset0 = token_dict_offset1[offsets[0]]
            tokenid0 = offset0['tokenid']
        else:
            pdb.set_trace()
        if offsets[1] in token_dict_offset2:
            offset1 = token_dict_offset2[offsets[1]]
            tokenid1 = offset1['tokenid']
        else:
            pdb.set_trace()
        offsets = (tokenid0, tokenid1)
        cluster = entity_dir[entity]['cluster']
        if not cluster in idCluster2spans:
            idCluster2spans[cluster] = []
        idCluster2spans[cluster].append(offsets)

    clusters = []
    for cl in idCluster2spans:
        if len(idCluster2spans[cl]) < 2:
            continue
        listSpan = idCluster2spans[cl]
        chain = []
        for l in listSpan:
            spans = []
            spans.append(l[0])
            spans.append(l[1])
            chain.append(spans)
        clusters.append(chain)

    for entity in entity_dir:
        offsets = tuple(entity_dir[entity]['offset'])
        if offsets[0] in token_dict_offset1:
            offset0 = token_dict_offset1[offsets[0]]
            tokenid0 = offset0['tokenid']
        else:
            pdb.set_trace()
        if offsets[1] in token_dict_offset2:
            offset1 = token_dict_offset2[offsets[1]]
            tokenid1 = offset1['tokenid']
        else:
            pdb.set_trace()
        ner[offset0['sentid']].append(
            [tokenid0, tokenid1, entity_dir[entity]['ner']])
        nercount += 1
    for relation in rel_dir:
        arg1 = rel_dir[relation]['arg1']
        arg2 = rel_dir[relation]['arg2']
        tokid0 = token_dict_offset1[entity_dir[arg1]['offset'][0]]['tokenid']
        tokid1 = token_dict_offset2[entity_dir[arg1]['offset'][1]]['tokenid']
        tokid2 = token_dict_offset1[entity_dir[arg2]['offset'][0]]['tokenid']
        tokid3 = token_dict_offset2[entity_dir[arg2]['offset'][1]]['tokenid']
        relations[token_dict_offset1[
            entity_dir[arg1]['offset'][0]]['sentid']].append([
                tokid0, tokid1, tokid2, tokid3, rel_dir[relation]['relation']
            ])
        relcount += 1
    docs.append({
        "sentences": sentences,
        "ner": ner,
        "relations": relations,
        "clusters": clusters,
        "doc_key": fn
    })
    return nercount, relcount, sentcount
def xml_param_df_cols(mission_file_name):
    # Borrowing this xml parsing and looping snippet from
    # scrimmage/scripts/generate_scenarios.py (GPL3 license)

    tree = ET.parse(mission_file_name)
    root = tree.getroot()

    team_keys = []
    list_df_by_team = []
    big_df_params = pd.DataFrame()

    # there is probably a nicer way to do this, but this gets the right type
    notatag = tree.find('this_isnt_a_tag')
    nonetype = type(notatag)

    # Find and loop over all "entity" tags in mission file.
    entity_num = 0
    key_idx = -1
    for child in root:
        # This is still a work in progress.
        # Want to get all of the parameters from the various plugins,
        # add the tag name, plugin name, and team number (where applicable) to
        # a Pandas DF column name, and then set the value for that column to be
        # what the param was during simulation as defined by the mission file.
        # if child.tag != 'entity_common' and child.tag != 'entity':
        #     continue

        if child.tag == "entity_common" or child.tag == "entity":
            ent_common_type = None
            if child.tag == 'entity_common':
                ent_common_type = child.attrib['name']
            elif child.tag == 'entity':
                try:
                    ent_common_type = child.attrib['entity_common']
                except KeyError:
                    ent_common_type = None
                    pass

            theteam = '0'
            theteamtag = child.find('team_id')
            if type(theteamtag) != nonetype:
                # we have a team_id tag
                theteam = theteamtag.text

            teamint = int(theteam)

            entdf = pd.DataFrame()
            toappend = ''
            if (teamint != 0):
                if (theteam in team_keys):
                    team_keys.append(theteam + 'a')
                else:
                    team_keys.append(theteam)
                key_idx += 1
                toappend += '_t_' + team_keys[key_idx]

            if ent_common_type is not None:
                try:
                    toappend += '_' + ent_common_type
                except:
                    pdb.set_trace()

            aut_block = child.find('autonomy')
            motion_block = child.find('motion_model')
            controller_block = child.find('controller')
            sensor_block = child.find('sensor')

            autdf = append_block(aut_block, 'autonomy', nonetype)
            if (entdf.empty):
                entdf = autdf
            else:
                entdf = entdf.join(autdf, sort=False)

            motiondf = append_block(motion_block, 'motion_model', nonetype)
            if (entdf.empty):
                entdf = motiondf
            else:
                entdf = entdf.join(motiondf, sort=False)

            controllerdf = append_block(controller_block, 'controller',
                                        nonetype)
            if (entdf.empty):
                entdf = controllerdf
            else:
                entdf = entdf.join(controllerdf, sort=False)

            sensordf = append_block(sensor_block, 'sensor', nonetype)
            if (entdf.empty):
                entdf = sensordf
            else:
                entdf = entdf.join(sensordf, sort=False)

            if (not entdf.empty):
                collist = entdf.columns
                colmap = {}
                for col in collist:
                    colmap[col] = col + toappend
                entdf.rename(index=str, columns=colmap, inplace=True)

            if (big_df_params.empty):
                big_df_params = entdf
            else:
                big_df_params = big_df_params.join(entdf, sort=False)

    # Return a df of all modified attributes of all teams, with team number
    # suffixes and/or entity_common attributes in column names
    if big_df_params.empty:
        pdb.set_trace()
    return big_df_params
Esempio n. 39
0
def apply_magic_wand(A,
                     gSig,
                     dims,
                     A_thr=None,
                     coms=None,
                     dview=None,
                     min_frac=0.7,
                     max_frac=1.0,
                     roughness=2,
                     zoom_factor=1,
                     center_range=2) -> np.ndarray:
    """ Apply cell magic Wand to results of CNMF to ease matching with labels

    Args:
        A:
            output of CNMF
    
        gSig: tuple
            input of CNMF (half neuron size)
    
        A_thr:
            thresholded version of A
    
        coms:
            centers of the magic wand
    
        dview:
            for parallelization
    
        min_frac:
            fraction of minimum of gSig to take as minimum size
    
        max_frac:
            multiplier of maximum of gSig to take as maximum size

    Returns:
        masks: ndarray
            binary masks
    """

    if (A_thr is None) and (coms is None):
        import pdb
        pdb.set_trace()
        A_thr = threshold_components(A.tocsc()[:],
                                     dims,
                                     medw=None,
                                     thr_method='max',
                                     maxthr=0.2,
                                     nrgthr=0.99,
                                     extract_cc=True,
                                     se=None,
                                     ss=None,
                                     dview=dview) > 0

        coms = [
            scipy.ndimage.center_of_mass(mm.reshape(dims, order='F'))
            for mm in A_thr.T
        ]

    if coms is None:
        coms = [
            scipy.ndimage.center_of_mass(mm.reshape(dims, order='F'))
            for mm in A_thr.T
        ]

    min_radius = np.round(np.min(gSig) * min_frac).astype(np.int)
    max_radius = np.round(max_frac * np.max(gSig)).astype(np.int)

    params = []
    for idx in range(A.shape[-1]):
        params.append([
            A.tocsc()[:, idx].toarray().reshape(dims, order='F'), coms[idx],
            min_radius, max_radius, roughness, zoom_factor, center_range
        ])

    logging.debug(len(params))

    if dview is not None:
        masks = np.array(list(dview.map(cell_magic_wand_wrapper, params)))
    else:
        masks = np.array(list(map(cell_magic_wand_wrapper, params)))

    return masks
Esempio n. 40
0
def markets():
    '''
    High-level: Info about a given Market
    Endpoint: api/v1/markets
    Provides:
        Lat/Long
        Market ID
        Market Name
        Products sold at market
            Product Avg Price
            Product Price Normalized to the rest of the state
    '''

    import pdb
    pdb.set_trace()

    with ENGINE.connect() as conn:
        r = conn.execute('''
            SELECT id, name, lat, long
            FROM location_xref;
            ''')
        products = conn.execute('''
            SELECT s.location_xref_id AS market, p.name AS product, AVG(s.price) as mean
            FROM sales s
            JOIN product_xref p
            ON s.product_xref_id=p.id
            GROUP BY market, product
            ORDER BY market;
            ''')
        state = conn.execute('''
            SELECT p.name AS product, s.price
            FROM sales s
            JOIN product_xref p
            ON s.product_xref_id=p.id;
            ''')

    #create list of dictionaries for market locations
    cols = ['id', 'name', 'lat', 'long']
    market_locations = [dict(izip(cols, market)) for market in r.fetchall()]

    #create normalizer class objects for each product
    df = pd.DataFrame([_ for _ in state.fetchall()],
                      columns=['product', 'price'])
    normalize_dct = dict()
    for product, data in df.groupby('product'):
        normalizer_obj = Normalizer(product, data.copy())
        normalize_dct[product] = normalizer_obj

    #create dictionary products at each market
    products_dct = defaultdict(list)
    for market_id, product, price in products.fetchall():
        products_dct[int(market_id)].append({
            'name':
            product,
            'avg_price':
            round(float(price), 2),
            'color_scale':
            normalize_dct[product].transform(float(price))
        })

    #combine all query data to single json object.
    #add products at each market to market summary
    market_summaries = []
    for market_dct in market_locations:
        market_dct['products'] = products_dct[market_dct['id']]
        market_summaries.append(market_dct)
    return jsonify(market_summaries)
Esempio n. 41
0
def subplot(
    plots,
    rows,
    cols,
    output_width,
    output_height,
    border=BORDER,
    titles=None,
    normalize=None,
    order=None,
    fancy_text=False,
):
    """
    Given a list of images, returns a single image with the sub-images tiled in a subplot.

    :param plots: array of numpy array images to plot. Can be of different sizes and dimensions as
        long as they are 2 or 3 dimensional.
    :param rows: int number of rows in subplot. If there are fewer images than rows, it will add
        empty space for the blanks. If there are fewer rows than images, it will not draw the
        remaining images.
    :param cols: int number of columns in subplot. Similar to rows.
    :param output_width: int width in pixels of a single subplot output image.
    :param output_height: int height in pixels of a single subplot output image.
    :param border: int amount of border padding pixels between each image.
    :param titles: titles for each subplot to be rendered on top of images.
    :param normalize: list of whether to subtract the max and divide by the min before colorizing.
        If none, assumed false for all images.
    :param order: if provided this reorders the provided plots before drawing them.
    :param fancy_text: if true, uses a fancier font than CV_FONT, but takes longer to render.
    :return: A single image containing the provided images (up to rows * cols).
    """
    global FANCY_FONT
    global FONT_SIZE

    if order is not None:
        plots = [plots[im_ind] for im_ind in order]
        if titles is not None:
            titles = [titles[im_ind] for im_ind in order]
        if normalize is not None:
            normalize = [normalize[im_ind] for im_ind in order]

    returned_image = np.full(
        ((output_height + 2 * border) * rows, (output_width + 2 * border) * cols, 3), 191, dtype=np.uint8
    )
    if fancy_text:
        from PIL import Image, ImageDraw, ImageFont

        if FANCY_FONT is None:
            FONT_SIZE = int(FONT_SIZE * output_width / 320.0)
            FANCY_FONT = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", FONT_SIZE)
    for row in range(rows):
        for col in range(cols):
            try:
                if col + cols * row >= len(plots):
                    return returned_image
                im = plots[col + cols * row]
                if im is None:
                    continue
                im = im.squeeze()
                if im.dtype != np.uint8 or len(im.shape) < 3:
                    if normalize is None or normalize[col + cols * row]:
                        im = im.astype(np.float32)
                        im -= np.min(im)
                        im *= 255 / max(np.max(im), 1e-10)
                        im = im.astype(np.uint8)
                    else:
                        im = im.astype(np.uint8)
                if len(im.shape) < 3:
                    im = cv2.applyColorMap(im, cv2.COLORMAP_JET)[:, :, ::-1]
                if im.shape != (output_height, output_width, 3):
                    im_width = im.shape[1] * output_height / im.shape[0]
                    if im_width > output_width:
                        im_width = output_width
                        im_height = im.shape[0] * output_width / im.shape[1]
                    else:
                        im_width = im.shape[1] * output_height / im.shape[0]
                        im_height = output_height
                    im_width = int(im_width)
                    im_height = int(im_height)
                    im = cv2.resize(im, (im_width, im_height), interpolation=cv2.INTER_NEAREST)
                    if im_width != output_width:
                        pad0 = int(np.floor((output_width - im_width) * 1.0 / 2))
                        pad1 = int(np.ceil((output_width - im_width) * 1.0 / 2))
                        im = np.lib.pad(im, ((0, 0), (pad0, pad1), (0, 0)), "constant", constant_values=0)
                    elif im_height != output_height:
                        pad0 = int(np.floor((output_height - im_height) * 1.0 / 2))
                        pad1 = int(np.ceil((output_height - im_height) * 1.0 / 2))
                        im = np.lib.pad(im, ((pad0, pad1), (0, 0), (0, 0)), "constant", constant_values=0)
                if (
                    titles is not None
                    and len(titles) > 1
                    and len(titles) > col + cols * row
                    and len(titles[col + cols * row]) > 0
                ):
                    if fancy_text:
                        if im.dtype != np.uint8:
                            im = im.astype(np.uint8)
                        im = Image.fromarray(im)
                        draw = ImageDraw.Draw(im)
                        if isinstance(titles[col + cols * row], str):
                            for x in range(6, 9):
                                for y in range(3, 6):
                                    draw.text((x, y), titles[col + cols * row], (0, 0, 0), font=FANCY_FONT)
                            draw.text((7, 4), titles[col + cols * row], (255, 255, 255), font=FANCY_FONT)
                        else:
                            for tt, title in enumerate(titles[col + cols * row]):
                                for x in range(6, 9):
                                    for y in range(3, 6):
                                        draw.text((x, y + tt * (FONT_SIZE + 5)), title, (0, 0, 0), font=FANCY_FONT)
                                draw.text((7, 4 + tt * (FONT_SIZE + 5)), title, (255, 255, 255), font=FANCY_FONT)
                        im = np.asarray(im)
                    else:
                        scale_factor = im.shape[1] / 320.0
                        if isinstance(titles[col + cols * row], str):
                            im = cv2.putText(
                                im.copy(),
                                titles[col + cols * row],
                                (30, int(30 * scale_factor)),
                                CV_FONT,
                                0.5 * scale_factor,
                                [0, 0, 0],
                                4,
                            )
                            im = cv2.putText(
                                im.copy(),
                                titles[col + cols * row],
                                (30, int(30 * scale_factor)),
                                CV_FONT,
                                0.5 * scale_factor,
                                [255, 255, 255],
                                1,
                            )
                        else:
                            for tt, title in enumerate(titles[col + cols * row]):
                                im = cv2.putText(
                                    im.copy(),
                                    title,
                                    (30, int((tt + 1) * 30 * scale_factor)),
                                    CV_FONT,
                                    0.5 * scale_factor,
                                    [0, 0, 0],
                                    4,
                                )
                                im = cv2.putText(
                                    im.copy(),
                                    title,
                                    (30, int((tt + 1) * 30 * scale_factor)),
                                    CV_FONT,
                                    0.5 * scale_factor,
                                    [255, 255, 255],
                                    1,
                                )
                returned_image[
                    border + (output_height + border) * row : (output_height + border) * (row + 1),
                    border + (output_width + border) * col : (output_width + border) * (col + 1),
                    :,
                ] = im
            except Exception as ex:
                print(ex)
                import traceback

                traceback.print_exc()
                print("Failed for image", col + cols * row)
                print("shape", plots[col + cols * row].shape)
                print("type", plots[col + cols * row].dtype)
                if titles is not None and len(titles) > col + col * row:
                    print("title", titles[col + cols * row])
                import pdb

                pdb.set_trace()
                print("bad")
                raise ex

    im = returned_image
    # for one long title
    if titles is not None and len(titles) == 1:
        if fancy_text:
            if im.dtype != np.uint8:
                im = im.astype(np.uint8)
            im = Image.fromarray(im)
            draw = ImageDraw.Draw(im)
            for x in range(9, 12):
                for y in range(9, 12):
                    draw.text((x, y), titles[0], (0, 0, 0), font=FANCY_FONT)
            draw.text((10, 10), titles[0], (255, 255, 255), font=FANCY_FONT)
            im = np.asarray(im)
        else:
            scale_factor = max(max(im.shape[0], im.shape[1]) * 1.0 / 300, 1)
            cv2.putText(im, titles[0], (10, 30), CV_FONT, 0.5 * scale_factor, [0, 0, 0], 4)
            cv2.putText(im, titles[0], (10, 30), CV_FONT, 0.5 * scale_factor, [255, 255, 255], 1)

    return im
Esempio n. 42
0
File: worker.py Progetto: max-k/mode
 def _enter_debugger(self) -> None:
     self.carp('Starting debugger...')
     import pdb  # noqa: T100
     pdb.set_trace()  # noqa: T100
Esempio n. 43
0
 def __str__(self):
   pdb.set_trace()
Esempio n. 44
0
File: win.py Progetto: dlb666666/CTF
import pdb
import requests

cookies = {"PHPSESSID": ""}  # Fill this in.
response = requests.post("https://www.hackthis.co.uk/levels/b3.php?submit",
                         data={"score": 194175},
                         cookies=cookies)
pdb.set_trace()  # Manually look at the response
Esempio n. 45
0
def print_my_secret():
    import os
    import pdb
    pdb.set_trace()
    print('sono' ' marco')
    print('my secret ' 'is: {}'.format(os.environ['MY_SECRET']))
Esempio n. 46
0
 def __str__(self):
   return "SwapContent('%s','%s','hidden')" % (self.item.id,self.target.id)
   pdb.set_trace()
Esempio n. 47
0
    def addStrokeToPath(self, path, stroke):
        """ Given a stroke from a path command
        (in the form (command, arguments)) create the path
        commands that represent it.
        
        TODO: break out into (yet another) class/module,
        especially so we can get O(1) dispatch on type?
        """
        type, arg = stroke
        relative = False
        if type == type.lower():
            relative = True
            ox, oy = path.GetCurrentPoint().Get()
        else:
            ox = oy = 0

        def normalizePoint(arg):
            x, y = arg
            return x + ox, y + oy

        def reflectPoint(point, relativeTo):
            x, y = point
            a, b = relativeTo
            return ((a * 2) - x), ((b * 2) - y)

        type = type.upper()
        if type == 'M':
            pt = normalizePoint(arg)
            self.firstPoints.append(pt)
            path.MoveToPoint(pt)
        elif type == 'L':
            path.AddLineToPoint(normalizePoint(arg))
        elif type == 'C':
            #control1, control2, endpoint = arg
            control1, control2, endpoint = map(normalizePoint, arg)
            self.lastControl = control2
            path.AddCurveToPoint(control1, control2, endpoint)
            #~ cp = path.GetCurrentPoint()
            #~ path.AddCircle(c1x, c1y, 5)
            #~ path.AddCircle(c2x, c2y, 3)
            #~ path.AddCircle(x,y, 7)
            #~ path.MoveToPoint(cp)
            #~ print "C", control1, control2, endpoint

        elif type == 'S':
            #control2, endpoint = arg
            control2, endpoint = map(normalizePoint, arg)
            if self.lastControl:
                control1 = reflectPoint(self.lastControl,
                                        path.GetCurrentPoint())
            else:
                control1 = path.GetCurrentPoint()
            #~ print "S", self.lastControl,":",control1, control2, endpoint
            self.lastControl = control2
            path.AddCurveToPoint(control1, control2, endpoint)
        elif type == "Q":
            (cx, cy), (x, y) = map(normalizePoint, arg)
            self.lastControlQ = (cx, cy)
            path.AddQuadCurveToPoint(cx, cy, x, y)
        elif type == "T":
            x, y, = normalizePoint(arg)
            if self.lastControlQ:
                cx, cy = reflectPoint(self.lastControlQ,
                                      path.GetCurrentPoint())
            else:
                cx, cy = path.GetCurrentPoint()
            self.lastControlQ = (cx, cy)
            path.AddQuadCurveToPoint(cx, cy, x, y)

        elif type == "V":
            _, y = normalizePoint((0, arg))
            x, _ = path.GetCurrentPoint()
            path.AddLineToPoint(x, y)

        elif type == "H":
            x, _ = normalizePoint((arg, 0))
            _, y = path.GetCurrentPoint()
            path.AddLineToPoint(x, y)

        elif type == "A":
            #wxGC currently only supports circular arcs,
            #not eliptical ones

            (
                (rx, ry),  #radii of ellipse
                angle,  #angle of rotation on the ellipse in degrees
                (fa, fs),  #arc and stroke angle flags
                (x, y)  #endpoint on the arc
            ) = arg

            x, y = normalizePoint((x, y))
            cx, cy = path.GetCurrentPoint()
            if (cx, cy) == (x, y):
                return  #noop

            if (rx == 0 or ry == 0):
                #no radius is effectively a line
                path.AddLineToPoint(x, y)
                return

            #find the center point for the ellipse
            #translation via the angle
            angle = angle % 360
            angle = math.radians(angle)

            #translated endpoint
            xPrime = math.cos(angle) * ((cx - x) / 2)
            xPrime += math.sin(angle) * ((cx - x) / 2)
            yPrime = -(math.sin(angle)) * ((cy - y) / 2)
            yPrime += (math.cos(angle)) * ((cy - y) / 2)

            temp = ((rx**2) * (ry**2)) - ((rx**2) *
                                          (yPrime**2)) - ((ry**2) *
                                                          (xPrime**2))
            temp /= ((rx**2) * (yPrime**2)) + ((ry**2) * (xPrime**2))
            temp = abs(temp)
            try:
                temp = math.sqrt(temp)
            except ValueError:
                import pdb
                pdb.set_trace()
            cxPrime = temp * ((rx * yPrime) / ry)
            cyPrime = temp * -((ry * xPrime) / rx)
            if fa == fs:
                cxPrime, cyPrime = -cxPrime, -cyPrime

            #reflect backwards now for the origin
            cnx = math.cos(angle) * cxPrime
            cnx += math.sin(angle) * cxPrime
            cny = -(math.sin(angle)) * cyPrime
            cny += (math.cos(angle)) * cyPrime
            cnx += ((cx + x) / 2.0)
            cny += ((cy + y) / 2.0)

            #calculate the angle between the two endpoints
            lastArc = wx.Point2D(x - cnx, y - cny).GetVectorAngle()
            firstArc = wx.Point2D(cx - cnx, cy - cny).GetVectorAngle()
            lastArc = math.radians(lastArc)
            firstArc = math.radians(firstArc)

            #aargh buggines.
            #AddArc draws a straight line between
            #the endpoints of the arc.
            #putting it in a subpath makes the strokes come out
            #correctly, but it still only fills the slice
            #taking out the MoveToPoint fills correctly.
            path.AddEllipse(cnx - rx, cny - ry, rx * 2, ry * 2)
            path.MoveToPoint(x, y)
            #~ npath = makePath()
            #~ npath.AddEllipticalArc(cnx-rx, cny-ry, rx*2, ry*2, firstArc, lastArc, False)
            #~ npath.MoveToPoint(x,y)
            #~ path.AddPath(npath)

        elif type == 'Z':
            #~ Bugginess:
            #~ CloseSubpath() doesn't change the
            #~ current point, as SVG spec requires.
            #~ However, manually moving to the endpoint afterward opens a new subpath
            #~ and (apparently) messes with stroked but not filled paths.
            #~ This is possibly a bug in GDI+?
            #~ Manually closing the path via AddLineTo gives incorrect line join
            #~ results
            #~ Manually closing the path *and* calling CloseSubpath() appears
            #~ to give correct results on win32

            pt = self.firstPoints.pop()
            path.AddLineToPoint(pt)
            path.CloseSubpath()
def add(num1, num2):
	pdb.set_trace()
	return num1 + num2
Esempio n. 49
0
 def test_demosssssssssss(self):
     res = self.client.get(reverse('cart'), **{'X_TOKEN':self.jwt})
     import pdb
     pdb.set_trace()
     self.assertEqual(len(res.data),1)
Esempio n. 50
0
def func4(n):
    a = n + 1
    pdb.set_trace()
    b = a - 2
    pdb.set_trace()
Esempio n. 51
0
def interactive_stop(is_interactive: bool, title: str, locals: dict):
    if is_interactive:
        rprint(f"[yellow]Interactive mode: {title}")
        local_info = pprint.pformat({k: f"{type(v).__module__}.{type(v).__qualname__}" for k, v in locals.items()})
        rprint("[yellow]Locals: ", local_info)
        pdb.set_trace()
Esempio n. 52
0
    def convert_bmes_to_dg(self, file, maxlen=128):
        #将BIO格式重新转回dg需要的格式
        with open(file) as f_in, open(file + '.out.txt', 'w') as f_out:
            lines = f_in.readlines()
            out_lines = []
            res = []
            words = []
            tags = []
            for idx, line in enumerate(lines):
                if line.strip() == "" and idx != 0:
                    out_lines.append(' '.join(words))
                    res.append(tags)
                    words = []
                    tags = []
                else:
                    words.append(line.strip().split('\t')[0])
                    tags.append(line.strip().split('\t')[1])

            for idx, line in enumerate(out_lines):
                char_list = line.split()
                tag_list = res[idx][:len(char_list)]
                result = []
                tmp = []
                ctype = ""
                for idy, char in enumerate(char_list):
                    if idy == len(char_list) - 1:
                        last_ctype = tag_list[idy][
                            2] if tag_list[idy] != 'O' else 'o'
                        if last_ctype == ctype:
                            tmp.append(char)
                            result.append('_'.join(tmp) + '/' + ctype)
                        else:
                            if tmp != []:
                                result.append('_'.join(tmp) + '/' + ctype)
                            result.append(char + '/' + last_ctype)

                    if tag_list[idy].startswith("E-") \
                            or tag_list[idy].startswith("S-"):
                        tmp.append(char)
                        if ctype == "": ctype = tag_list[idy][2]
                        result.append('_'.join(tmp) + '/' + ctype)
                        tmp = []
                        ctype = ""
                    elif tag_list[idy].startswith("M-"):
                        tmp.append(char)
                        ctype = tag_list[idy][2]
                    elif tag_list[idy].startswith("B-"):
                        if len(tmp) >= 1:
                            if ctype == "": ctype = tag_list[idy - 1][2]
                            result.append('_'.join(tmp) + '/' + ctype)
                            tmp = []
                            ctype = ""
                        tmp.append(char)
                        ctype = tag_list[idy][2]
                    elif tag_list[idy].startswith("O"):
                        if len(tmp) >= 1 and ctype != "o":
                            if ctype == "": ctype = tag_list[idy - 1][2]
                            result.append('_'.join(tmp) + '/' + ctype)
                            tmp = []
                            ctype = ""
                        ctype = 'o'
                        tmp.append(char)
                    else:
                        print("error occured in line %s" % idx)
                        pdb.set_trace()
                f_out.write('  '.join(result) + '\n')
Esempio n. 53
0
    def __call__(self, state, nu, ddY, h0, zdd, U0, rdot):
        fail_flag = state.fail_id
        phi = state.att[0]
        theta = state.att[1]
        chi = self.chi
        if state.fail_id == 0 or state.fail_id == 2:
            chi = np.pi - self.chi

        h1 = h0[0]
        h2 = h0[1]
        h3 = h0[2]

        Gp = np.array([
            self.k * self.b * sin(self.beta), 
            -self.k * self.b * sin(self.beta), 
            -self.k * self.b * sin(self.beta), 
            self.k * self.b * sin(self.beta)
        ])/ self.ix

        Gq = np.array([
            self.k * self.b * np.cos(self.beta), 
            self.k * self.b * np.cos(self.beta), 
            -self.k * self.b * np.cos(self.beta), 
            -self.k * self.b * np.cos(self.beta)
        ]) / self.iy

        Gr = np.array([self.t, -self.t, self.t, -self.t]) / self.iz
        
        G0 = np.array([
            -(self.k/self.mass) * cos(theta) * cos(phi) * np.ones(4), 
            -h3 * Gq + h2 * Gr,
            h3 * Gp - h1 * Gr,
            Gr
        ])

        R = block_diag(1, np.array([[cos(chi), sin(chi)], [-sin(chi), cos(chi)]]), 1)
        ddy0 = np.vstack([zdd, ddY, rdot])
        G = np.matmul(R, G0)

        if self.DRF_enable and fail_flag >= 0:
            if fail_flag == 0 or fail_flag == 2:
                fail_id = [0, 2]
            elif fail_flag == 1 or fail_flag == 3:
                fail_id = [1, 3]
            else:
                raise NotImplementedError("")
        else:
            fail_id = fail_flag
        
        if fail_flag >= 0:
            if self.DRF_enable == 1:
                G[:, fail_id] = np.zeros((4, len(fail_id)))
                ddy0[2:,:] = np.zeros((2, 1))
                G[2:, :] = np.zeros_like(G[2:, :])
                nu[2:, :] = np.zeros((2, 1)) 
            else:
                #pass
                G[:,fail_id] = np.zeros((4, ))
                ddy0[3,:] = 0  #assuming vector nx1
                G[3, :] = np.zeros_like(G[3, :])
                nu[3,:] = 0#assuming vector nx1
        try:
            dU = np.matmul(np.linalg.pinv(G), nu - ddy0)
        except np.linalg.LinAlgError as e:
            import pdb; pdb.set_trace()
            x = 21
        
        if (self._check_nan(dU)):
            import pdb; pdb.set_trace()
            x=22

        Y = (nu - ddy0)
        U = U0 + dU
        #if fail_flag > 0:
        #    U[fail_id] = 0.0
        return U, Y, dU
a = "hello"
print(a)
import pdb; pdb.set_trace()


# !import code; code.interact(local=vars())

# c = continue
# print(single letter var)
Esempio n. 55
0
    def main(self):
        """Main function. This should not be overridden by the subclass test scripts."""

        parser = optparse.OptionParser(usage="%prog [options]")
        parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
                          help="Leave obsrds and test.* datadir on exit or error")
        parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
                          help="Don't stop obsrds after the test execution")
        parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
                          help="Source directory containing obsrd/obsr-cli (default: %default)")
        parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
                          help="Directory for caching pregenerated datadirs")
        parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
        parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
                          help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
        parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
                          help="Print out all RPC calls as they are made")
        parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
                          help="The seed to use for assigning port numbers (default: current process id)")
        parser.add_option("--coveragedir", dest="coveragedir",
                          help="Write tested RPC commands into this directory")
        parser.add_option("--configfile", dest="configfile",
                          help="Location of the test framework config file")
        parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
                          help="Attach a python debugger if test fails")
        parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
                          help="use bitcoin-cli instead of RPC for all commands")
        self.add_options(parser)
        (self.options, self.args) = parser.parse_args()

        PortSeed.n = self.options.port_seed

        os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']

        check_json_precision()

        self.options.cachedir = os.path.abspath(self.options.cachedir)

        # Set up temp directory and start logging
        if self.options.tmpdir:
            self.options.tmpdir = os.path.abspath(self.options.tmpdir)
            os.makedirs(self.options.tmpdir, exist_ok=False)
        else:
            self.options.tmpdir = tempfile.mkdtemp(prefix="test")
        self._start_logging()

        success = TestStatus.FAILED

        try:
            if self.options.usecli and not self.supports_cli:
                raise SkipTest("--usecli specified but test does not support using CLI")
            self.setup_chain()
            self.setup_network()
            time.sleep(5)
            self.run_test()
            success = TestStatus.PASSED
        except JSONRPCException as e:
            self.log.exception("JSONRPC error")
        except SkipTest as e:
            self.log.warning("Test Skipped: %s" % e.message)
            success = TestStatus.SKIPPED
        except AssertionError as e:
            self.log.exception("Assertion failed")
        except KeyError as e:
            self.log.exception("Key error")
        except Exception as e:
            self.log.exception("Unexpected exception caught during testing")
        except KeyboardInterrupt as e:
            self.log.warning("Exiting after keyboard interrupt")

        if success == TestStatus.FAILED and self.options.pdbonfailure:
            print("Testcase failed. Attaching python debugger. Enter ? for help")
            pdb.set_trace()

        if not self.options.noshutdown:
            self.log.info("Stopping nodes")
            if self.nodes:
                self.stop_nodes()
        else:
            for node in self.nodes:
                node.cleanup_on_exit = False
            self.log.info("Note: obsrds were not stopped and may still be running")

        if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
            self.log.info("Cleaning up")
            shutil.rmtree(self.options.tmpdir)
        else:
            self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)

        if success == TestStatus.PASSED:
            self.log.info("Tests successful")
            exit_code = TEST_EXIT_PASSED
        elif success == TestStatus.SKIPPED:
            self.log.info("Test skipped")
            exit_code = TEST_EXIT_SKIPPED
        else:
            self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
            self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
            exit_code = TEST_EXIT_FAILED
        logging.shutdown()
        sys.exit(exit_code)
Esempio n. 56
0
    def get_supported_coins(cls):
        db, cursor = cls.db_connect()
        sql = "SELECT coin_ticker, coin_name, sector_ticker FROM coinindexcap.coins"
        cursor.execute(sql)
        df = pd.DataFrame(list(cursor.fetchall()),
                          columns=["ticker", "coin_name", "sector"])
        return df

    @classmethod
    def get_minutely_coin_data(cls, ticker, start):
        db, cursor = cls.db_connect()
        sql = """SELECT TimeStampID, Price_USD, Price_BTC, MarketCap_USD,
			Volume24hr_USD FROM	coinindexcap.minutely_data WHERE Ticker = '{}'""".format(
            ticker)
        if start: sql += " WHERE TimeStampID > '{start}'".format(start=start)
        cursor.execute(sql)
        df = pd.DataFrame(list(cursor.fetchall()),
                          columns=[
                              "TimeStampID", "Price_USD", "Price_BTC",
                              "MarketCap_USD", "Volume24hr_USD"
                          ])
        return df


if __name__ == "__main__":

    coins = BigOlDB.get_minutely_coin_data('ETH')

    import pdb
    pdb.set_trace()  # breakpoint 6a54cd68 //
def pep(url1):
    file_name = url1.split('/')[-1]
    import pdb; pdb.set_trace()
    cmd = 'pep8 '+file_name
    out2 = sub.Popen(cmd, stderr=sub.PIPE, stdout=sub.PIPE, shell=True)
    return out2.communicate()
Esempio n. 58
0
def beautify(legend=False, rightlegend=False):
    """Customize figure format.

    adding a legend, axis label, etc

    :param bool legend: if True, display a box legend
    :param bool rightlegend: if True, makes some space on the right for
                             legend

    """
    # Get axis handle and set scale for each axis
    axisHandle = plt.gca()
    axisHandle.set_xscale("log")
    try:
        axisHandle.set_yscale("log")
    except OverflowError:
        set_trace()

    # Grid options
    axisHandle.yaxis.grid(True)

    ymin, ymax = plt.ylim()

    # quadratic slanted "grid"
    if 1 < 3:
        for i in range(-2, 7, 1 if ymax < 1e5 else 2):
            plt.plot((0.2, 20000), (10**i, 10**(i + 5)), 'k:',
                     linewidth=0.5)  # grid should be on top
    else:  # to be removed
        plt.plot((2,200), (1, 1e2), 'k:', zorder=-1)  # -1 -> plotted below?
        # plt.plot((2,200), (1, 1e4), 'k:', zorder=-1)
        plt.plot((2,200), (1e3, 1e5), 'k:', zorder=-1)
        # plt.plot((2,200), (1e3, 1e7), 'k:', zorder=-1)
        plt.plot((2,200), (1e6, 1e8), 'k:', zorder=-1)
        # plt.plot((2,200), (1e6, 1e10), 'k:', zorder=-1)

    plt.ylim(10**-0.2, ymax) # Set back the default maximum.

    # ticks on axes
    #axisHandle.invert_xaxis()
    dimticklist = testbedsettings.current_testbed.dimensions_to_display
    dimannlist = testbedsettings.current_testbed.dimensions_to_display
    # TODO: All these should depend on (xlim, ylim)

    axisHandle.set_xticks(dimticklist)
    axisHandle.set_xticklabels([str(n) for n in dimannlist])

    dim_min_margin = testbedsettings.current_testbed.dimensions_to_display[0] * 0.9
    dim_max_margin = testbedsettings.current_testbed.dimensions_to_display[-1] * 1.125

    if rightlegend:
        plt.xlim(  dim_min_margin,  10 ** (numpy.log10(dim_max_margin / dim_min_margin)*1.25) * dim_min_margin)
    else:
        plt.xlim(dim_min_margin, dim_max_margin)

    if 1 < 3:
        tick_locs = [n for n in axisHandle.get_yticks()
                     if n > plt.ylim()[0] and n < plt.ylim()[1]]
        tick_labels = ['%d' % round(numpy.log10(n)) if n < 1e10  # assure 1 digit for uniform figure sizes
                       else '' for n in tick_locs]
        axisHandle.set_yticks(tick_locs)
        axisHandle.set_yticklabels(tick_labels)
    else:  # old code for debugging purpose, just remove after Sept 2017
        tmp = axisHandle.get_yticks()
        tmp2 = []
        for i in tmp:
            tmp2.append('%d' % round(numpy.log10(i)))
        axisHandle.set_yticklabels(tmp2)

    if legend:
        toolsdivers.legend(loc=0, numpoints=1,
                           fontsize=fontsize * legend_fontsize_scaler())
Esempio n. 59
0
 def _plot_cm(self):
     cm = ConfusionMatrix(self.y_test, y_test_pred)
     cm.plot()
     plt.show()
     LOGGER.info(f'confusion matrix: {cm} ')
     pdb.set_trace()
Esempio n. 60
0
 def on_signal(*args):
     import pdb
     pdb.set_trace()