Example #1
0
    def install_server(self):
        self.mod_files.append(self.minecraft_server_jar())

        for f in self.mod_files:
            if not f['required_on_server']:
                # pass - note that currently, there are some files (i.e. authlib-1.5.13.jar) which don't have an install path, so can't be handled.
                continue

            try:
                f.validate_attributes()
            except AssertionError:
                print("INSTALLATION FAILED - MOD FILE DEFINITION INVALID.")
                import pprint

                pprint.pprint(f)
                return 'FAILURE'

            print('-')

            mkdir(self['download_cache_folder'])
            os.chdir(self['download_cache_folder'])
            f.download("server")

            mkdir(self['install_folder'])
            os.chdir(self['install_folder'])
            f.install(self, "server")

        print('-\r\nWriting eula.txt')
        os.chdir(self['install_folder'])
        with open('eula.txt', 'w') as eula:
            eula.write("eula=true")
Example #2
0
def test():
    user_agent = "NowKithMikeTyson 0.0 by /u/redleader6432"
    r = praw.Reddit(user_agent=user_agent)

    user_name = "redleader6432"
    user = r.get_redditor(user_name)

    thing_limit = 10

    gen = user.get_submitted(limit=thing_limit)
    com = user.get_comments(limit=thing_limit)

    karma_by_subreddit = {}
    comments = {}

    for thing in gen:
        subreddit = thing.subreddit.display_name
        karma_by_subreddit[subreddit] = (karma_by_subreddit.get(subreddit, 0) + thing.score)

    for thing in com:
        subreddit = thing.subreddit.display_name
        comments[subreddit] = (karma_by_subreddit.get(subreddit, 0) + thing.score)

    print('Submitted:')
    pprint.pprint(karma_by_subreddit)

    print('Comments:')
    pprint.pprint(comments)
Example #3
0
    def test_user_queues(self):
        queues = self.user.get_queues(expand="@title", sort_order="alphabetical", start_index=0, max_results=10)
        self.assertIsNotNone(queues)
        dump_object(queues)

        queues_instant = self.user.get_queues_instant(sort_order="alphabetical", start_index=0, max_results=2)
        self.assertIsNotNone(queues_instant)
        pprint.pprint(queues_instant)
        for queue in queues_instant['queue']:
            q = self.user.get_resource(queue['id'], data={})
            print queue['id']
            print(q.content)

        try:
            queues_disc = self.user.get_queues_disc(sort_order="alphabetical", start_index=0, max_results=10)
            self.assertIsNotNone(queues_disc)
            dump_object(queues_disc)
        except NetflixError as e :
            dump_object(e)


        queues_ia = self.user.get_queues_instant_available(sort_order="alphabetical", start_index=0, max_results=10)
        self.assertIsNotNone(queues_ia)
        dump_object(queues_ia)

        queues_is = self.user.get_queues_instant_saved(sort_order="alphabetical", start_index=0, max_results=10)
        self.assertIsNotNone(queues_is)
        dump_object(queues_is)
Example #4
0
def runTest():
    print("Test script:")
    import pprint
    print("Component tree of all installed components:")
    pprint.pprint(getAllComponents_tree())
    print("")
    test_cfg = RHNOptions(sys.argv[1])
    #    test_cfg = RHNOptions('server.app')
    #    test_cfg = RHNOptions('proxy.broker')
    #    test_cfg = RHNOptions('proxy.redirect', _CONFIG_ROOT)
    #    test_cfg = RHNOptions('proxy.redirect', '/tmp')
    #    test_cfg.filename = 'empty.conf'
    test_cfg.parse()
    print("=============== the object's repr ================================")
    print(test_cfg)
    print("=============== the object's defaults ============================")
    pprint.pprint(test_cfg.getDefaults())
    print("=============== an erronous lookup example =======================")
    print("testing __getattr__")
    try:
        print(test_cfg.lkasjdfxxxxxxxxxxxxxx)
    except AttributeError:
        e = sys.exc_info()[1]
        print('Testing: "AttributeError: %s"' % e)
    print("")
    print(
        "=============== the object's merged settings ======================")
    test_cfg.show()
    print(
        "=============== dump of all relevant dictionaries =================")
    test_cfg.showall()
    print(
        "===================================================================")
def RunDispatch(txn,
                balances={},
                commodities={},
                raise_error=False,
                check_not_empty=False):
    """Dispatch a transaction to its handler."""
    key = (txn['type'], txn['description'])
    try:
        handler = _DISPATCH[key]
    except KeyError:
        if raise_error:
            pprint.pprint(txn)
            raise ValueError("Unknown message: {}".format(repr(key)))
        logging.error("Ignoring message for: %s", repr(key))
    else:
        # Call the handler method.
        signature = inspect.signature(handler)
        kwargs = dict()
        if 'balances' in signature.parameters:
            kwargs['balances'] = balances
        if 'commodities' in signature.parameters:
            kwargs['commodities'] = commodities
        result = handler(txn, **kwargs)

        if result and not isinstance(result, list):
            result = [result]
        if check_not_empty:
            empty_txn = PruneEmptyValues(txn)
            assert not empty_txn, pformat(empty_txn)
        return result
Example #6
0
    def printJSONCursor(self, fieldnames, datemap, time_format=None):
        """

        Output plan json objects.

        :param c: collection
        :param fieldnames: fieldnames to include in output
        :param datemap: fieldnames to map dates to date strings
        :param time_format: field names to map to a specific time format
        :return:
        """

        count = 0

        with self._smart_open(self._filename) as output:
            for i in self._cursor:
                # print( "processing: %s" % i )
                # print( "fieldnames: %s" % fieldnames )
                self._results.append(i)
                d = CursorFormatter.fieldMapper(i, fieldnames)
                # print( "processing fieldmapper: %s" % d )
                d = CursorFormatter.dateMapper(d, datemap, time_format)
                pprint.pprint(d, output)
                count = count + 1

        return count
Example #7
0
    def install_server(self):
        self.mod_files.append(self.minecraft_server_jar())

        for f in self.mod_files:
            if not f['required_on_server']:
                # pass - note that currently, there are some files (i.e. authlib-1.5.13.jar) which don't have an install path, so can't be handled.
                continue

            try:
                f.validate_attributes()
            except AssertionError:
                print ("INSTALLATION FAILED - MOD FILE DEFINITION INVALID.")
                import pprint

                pprint.pprint(f)
                return 'FAILURE'

            print ('-')

            mkdir(self['download_cache_folder'])
            os.chdir(self['download_cache_folder'])
            f.download("server")

            mkdir(self['install_folder'])
            os.chdir(self['install_folder'])
            f.install(self, "server")

        print ('-\r\nWriting eula.txt')
        os.chdir(self['install_folder'])
        with open('eula.txt', 'w') as eula:
            eula.write("eula=true")
Example #8
0
def evaluate(args):

  opt = vars(args)
  
  # make other options
  opt['dataset_splitBy'] = opt['dataset'] + '_' + opt['splitBy']
  
  if args.cfg_file is not None:
    cfg_from_file(args.cfg_file)
  if args.set_cfgs is not None:
    cfg_from_list(args.set_cfgs)

  print('Using config:')
  pprint.pprint(cfg)

  # set up loader
  data_json = osp.join('cache/prepro', opt['dataset_splitBy'], 'data.json')
  data_h5 = osp.join('cache/prepro', opt['dataset_splitBy'], 'data.h5')
  loader = GtMRCNLoader(data_json, data_h5)
  
  # set up model
  opt['vocab_size']= loader.vocab_size
  opt['C4_feat_dim'] = 512
  net = vgg16(opt, batch_size=1)
  
  
  net.create_architecture(81, tag='default',
                          anchor_scales=cfg.ANCHOR_SCALES,
                          anchor_ratios=cfg.ANCHOR_RATIOS)
  
  sfile = osp.join(opt['dataset_splitBy'], 'output_{}'.format(opt['output_postfix']), 'vgg16_faster_rcnn_iter_{}.pth'.format(opt['model_iter']))
  print('Restoring model snapshots from {:s}'.format(sfile))
  saved_state_dict = torch.load(str(sfile))
  count_1 = 0
  new_params = net.state_dict().copy()
  for name, param in new_params.items():
    #print(name, param.size(), saved_state_dict[name].size())
    if name in saved_state_dict and param.size() == saved_state_dict[name].size():
      new_params[name].copy_(saved_state_dict[name])
      #print('---- copy ----')
    else:
      print(name, '----')
      count_1 += 1
  print('size not match:', count_1)
  net.load_state_dict(new_params)
  
  net.eval()
  net.cuda()
  
  split = opt['split']
  
  crit = None
  acc, num_sent = eval_split(loader, net, crit, split, opt)
  print('Comprehension on %s\'s %s (%s sents) is %.2f%%' % \
        (opt['dataset_splitBy'], split, num_sent, acc*100.))
  
  # write to results.txt
  f = open('experiments/det_results.txt', 'a')
  f.write('[%s][%s], id[%s]\'s acc is %.2f%%\n' % \
          (opt['dataset_splitBy'], opt['split'], opt['id'], acc*100.0))
Example #9
0
def print_cluster_data(args):

    """loading precomputed fits"""
    from util import depickle_fits

    res = depickle_fits(args.file, suffix="cfits")

    """retrieving weightmatrix from fits dic"""
    W = res["map"]["W"]
    dic_W = res["map"]

    """get fit and cluster data"""
    cl = res["dic_cluster"]
    num_dead = cl["num_dead_rf"]
    alive = W.shape[0] - num_dead

    import pprint as pp

    print

    print "*****+ Trained map data"
    print "visible units", W.shape[1]
    print "hidden units", W.shape[0]
    print "alive hidden units", alive, "(", N.round(alive / float(W.shape[0]), 2), "% )"
    print "*****+ Map Parameter"
    print pp.pprint([(key, dic_W[key]) if key != "W" else () for key in dic_W.keys()])

    print "*****+ Cluster data"
    print "num cluster", cl["num_clusters"]
    print "num rf in each cluster", cl["num_each_cluster"]
    print "prototype weights each cluster", N.round(cl["prototype_cluster"], 2)
    print "*****+ Cluster Parameters"
    print pp.pprint(cl["args"])
Example #10
0
    def get_top_ten(self):
        topten = [
            'hotmail.com', 'gmail.com', 'yahoo.com', 'aol.com', 'comcast.com',
            'mail.ru', 'web.de', 'qq.com', 'gmx.net', 'naver.com'
        ]

        print "Domain\tTLS\tVerified\t"
        results = []
        for domain in topten:
            self.cur.execute("select id from domains where domain = '%s';" %
                             domain)
            dom_id = self.cur.fetchone()[0]
            self.cur.execute(
                "select mx_id from domains_mx where domain_id = %s;" % dom_id)
            mx_id = self.cur.fetchone()[0]
            self.cur.execute(
                "select server_id from mx_servers where mx_id = %s;" % mx_id)
            serv_id = self.cur.fetchone()[0]

            self.cur.execute("select tls from servers where id = %s;" %
                             serv_id)
            tls = self.cur.fetchone()[0]
            self.cur.execute(
                "select ssl_verified from servers where id = %s;" % serv_id)
            verified = self.cur.fetchone()[0]
            results.append([domain, tls, verified])

            #num_servers_tls = int(self.cur.fetchone()[0])
            #self.cur.execute("select count(*) from Servers where ssl_verified = 'True';")

        pprint.pprint(results)
Example #11
0
def suggestion(dict, sample):
    # aus sample process extrahieren, schwerpunktsenergie
    query = {
        "energy": get_energy(sample, "2016"),
        "process": get_process(sample).replace("/", "")
    }
    print "searching for previously known cross-sections with following query: "
    pprint.pprint(query)
    query_result = []
    best_guess = 0
    for sample, values in dict.iteritems():
        matches = True
        for name, attribute in values.iteritems():
            if not name in query: continue
            if not (re.match(str(query[name]), str(attribute)) !=
                    None):  #query[name] != attribute:
                matches = False
        if matches and "xsec" in dict[sample]:
            query_result.append("\t" + make_nickname(values) + " : " +
                                str(dict[sample]["xsec"]))
            if best_guess == 0:
                best_guess = dict[sample]["xsec"]
            else:
                if best_guess != dict[sample]["xsec"]:
                    best_guess = -1  # differences, no recommendation found
    print "previously set: "
    pprint.pprint(query_result)
    return best_guess
Example #12
0
def main():
	parser = optparse.OptionParser(usage="usage: %prog [options]",
	                               description="Script to extend datasets.json by another sample")
	# sample
	parser.add_option("-s", "--sample", help="official Sample Sting")
	# crosssection
	parser.add_option("-x", "--xsec", help="Sample cross section")


	(options, args) = parser.parse_args()
	sample = options.sample
	xsec = float(options.xsec)
	#xsec = eval_expr(xsec.replace('x', '*').replace(' ', ''))
	dict = database
	if "xsec" in dict[sample]:
		print "sample already does have a cross-section of " + str(dict[sample]["xsec"]) + ". Stopping"
		sys.exit()
	best_guess = suggestion(dict, sample)
	if best_guess > 0 and xsec < 0:
		print "automatically fetching cross-section from previously known samples"
		xsec = best_guess
	if xsec <= 0.0:
		print "no proper cross-section defined. Stopping"
		sys.exit()
	dict[sample]["xsec"] = float(xsec)
	print "\n new datasets.json entry: "
	pprint.pprint( dict[sample])
	save_database(dict, dataset)
Example #13
0
    def upload_fastq(self, ctx, params):
        """
        :param params: instance of type "UploadFastqParams" (testing
           invocation of ReadsUtils) -> structure: parameter "fwd_id" of
           String, parameter "wsid" of Long, parameter "wsname" of String,
           parameter "objid" of Long, parameter "name" of String, parameter
           "rev_id" of String, parameter "sequencing_tech" of String
        :returns: instance of type "UploadFastqObjref"
        """
        # ctx is the context object
        # return variables are: objref
        #BEGIN upload_fastq
        print("hai this is upload_fastq here, params are")
        pprint.pprint(params)
        #ReadsUtils_instance = ReadsUtils(url=self.callbackURL, token=ctx['token'])
        #method_retVal = ReadsUtils_instance.upload_reads( params )
        objref = "Vooch"
        #END upload_fastq

        # At some point might do deeper type checking...
        if not isinstance(objref, basestring):
            raise ValueError('Method upload_fastq return value ' +
                             'objref is not type basestring as required.')
        # return the results
        return [objref]
Example #14
0
async def start_service(send_channel):
    async with oauth.authorizer(_watch_token) as authorize, \
               client.factory("www.googleapis.com", 443) as make_request, \
               send_channel:

        seen = set()

        while True:
            data = await client.json_auth_request_factory(
                partial(youtube.list_live_broadcasts_builder), authorize,
                make_request)

            import pprint
            pprint.pprint(data)

            async with trio.open_nursery() as chat_handlers:
                # Make two concurrent calls to child()
                for broadcast in data["items"]:
                    if broadcast["id"] in seen:
                        print("skipping", broadcast["id"], file=sys.stderr)
                        continue
                    print("starting chat handler for broadcast",
                          broadcast["id"],
                          file=sys.stderr)
                    seen.add(broadcast["id"])
                    chat_handlers.start_soon(
                        partial(
                            chat_handler,
                            live_chat_id=broadcast["snippet"]["liveChatId"],
                            authorize=authorize,
                            send_channel=send_channel,
                        ))
            await trio.sleep(60)
def test_get_default_msfragger_3():
    up = uparma.UParma()
    default_params = up.get_default_params("msfragger_style_3")
    import pprint

    pprint.pprint(default_params)
    assert default_params["enzyme_specificity"]["translated_value"] == 2
Example #16
0
    def test_user_queues(self):
        queues = self.user.get_queues(expand="@title", sort_order="alphabetical", start_index=0, max_results=10)
        self.assertIsNotNone(queues)
        dump_object(queues)

        queues_instant = self.user.get_queues_instant(sort_order="alphabetical", start_index=0, max_results=2)
        self.assertIsNotNone(queues_instant)
        pprint.pprint(queues_instant)
        for queue in queues_instant['queue']:
            q = self.user.get_resource(queue['id'], data={})
            print queue['id']
            print(q.content)

        try:
            queues_disc = self.user.get_queues_disc(sort_order="alphabetical", start_index=0, max_results=10)
            self.assertIsNotNone(queues_disc)
            dump_object(queues_disc)
        except NetflixError as e :
            dump_object(e)


        queues_ia = self.user.get_queues_instant_available(sort_order="alphabetical", start_index=0, max_results=10)
        self.assertIsNotNone(queues_ia)
        dump_object(queues_ia)

        queues_is = self.user.get_queues_instant_saved(sort_order="alphabetical", start_index=0, max_results=10)
        self.assertIsNotNone(queues_is)
        dump_object(queues_is)
Example #17
0
    def _distributor(self, label, cv, param, eval_metric, early_stopping_rounds=50):
        start = time()

        if self.is_classifier:
            label = 'XGBClassifier'
            rs = XGBClassifier(param)
        else:
            label = 'XGBRegressor'
            rs = XGBRegressor(param)

        X_visible, X_blind, y_visible, y_blined = \
            train_test_split(
                self.X_train, self.y_train, random_state=1301, stratify=self.y_train, test_size=0.4)

        rs.fit(self.X_visible, self.y_visible, eval_metric, early_stopping_rounds=50,
               eval_set=[(X_visible, y_visible), (X_blind, y_blined)])

        self.result[label] = {}
        self.result[label]['clf'] = rs
        # self.result[label]['score'] = rs.best_score_
        self.result[label]['time'] = time() - start
        # self.result[label]['set'] = ('n_iter: %s cv: %s' % (n_iter, cv))

        pprint.pprint(self.result[label])
        # pprint.pprint(rs.grid_scores_)

        out_result = open(self.result_address, 'wb')
        pickle.dump(self.result, out_result)
        out_result.close()
Example #18
0
def shops_in_sf():
    output = cur.execute('SELECT value, COUNT(*) as num FROM nodes_tags\
                            WHERE key="shop" \
                            GROUP BY value \
                            ORDER BY num DESC' )
    pprint.pprint(output.fetchall())
    return output.fetchall()
 def json_put(self, _req_url, _data, debug_=False, response_json_list_=None):
     """
     Send a PUT request. This is presently used for data sent to /status-msg for
     creating a new messaging session. It is not presently used for CLI scripting
     so lacks suppress_x features.
     :param _req_url: url to put
     :param _data: data to place at URL
     :param debug_: enable debug output
     :param response_json_list_: array for json results in the response object, (alternative return method)
     :return: http response object
     """
     json_response = None
     try:
         lf_r = LFRequest.LFRequest(self.lfclient_url, _req_url, debug_=self.debug, die_on_error_=self.exit_on_error)
         lf_r.addPostData(_data)
         if debug_ or self.debug:
             LANforge.LFUtils.debug_printer.pprint(_data)
         json_response = lf_r.json_put(show_error=self.debug,
                                       debug=(self.debug or debug_),
                                       response_json_list_=response_json_list_,
                                       die_on_error_=self.exit_on_error)
         if debug_ and (response_json_list_ is not None):
             pprint.pprint(response_json_list_)
     except Exception as x:
         if self.debug or self.halt_on_error or self.exit_on_error:
             print("json_put submitted to %s" % _req_url)
             pprint.pprint(_data)
             print("Exception %s:" % x)
             traceback.print_exception(Exception, x, x.__traceback__, chain=True)
         if self.halt_on_error or self.exit_on_error:
             exit(1)
     return json_response
Example #20
0
def register_new_sample(dict, options):
	# split sample name
	pd_name, details, filetype = options.sample.strip("/").split("/")
	new_entry = {}
	sample = options.sample
	if(sample in dict):
		new_entry = dict[sample]
	else:
		new_entry = {}
	if options.dbs:
		new_entry["inputDBS"]=options.dbs
	new_entry["data"]      = is_data(details,options.data)
        new_entry["embedded"]  = is_embedded(filetype)
	
	new_entry["energy"]    = get_energy(pd_name, details, data = new_entry["data"], isembedded=new_entry["embedded"])
	new_entry["campaign"]  = get_campaign(details, energy=new_entry["energy"])
	new_entry["scenario"]  = get_scenario(details, energy=new_entry["energy"], data=new_entry["data"])
	new_entry["generator"] = get_generator(pd_name, data=new_entry["data"])
	new_entry["process"]   = get_process(pd_name)
	new_entry["format"]    = get_format(filetype, default=None)
	new_entry["n_events_generated"]    = get_n_generated_events(sample, options.dbs)
	new_entry["n_files"]               = get_n_files(sample, options.dbs)
	new_entry["extension"] = get_extension(details)
	if options.xsec != "":
		new_entry["xsec"] = float(options.xsec)
	pprint.pprint(new_entry)
	print "The nickname will be: "
	print make_nickname(new_entry)
	check_nickname_unique(make_nickname(new_entry))
	dict[sample] = new_entry
	return dict
Example #21
0
def main():
    parser = optparse.OptionParser(
        usage="usage: %prog [options]",
        description="Script to extend datasets.json by another sample")
    # sample
    parser.add_option("-s", "--sample", help="official Sample Sting")
    # crosssection
    parser.add_option("-x", "--xsec", help="Sample cross section")

    (options, args) = parser.parse_args()
    sample = options.sample
    xsec = float(options.xsec)
    #xsec = eval_expr(xsec.replace('x', '*').replace(' ', ''))
    dict = database
    if "xsec" in dict[sample]:
        print "sample already does have a cross-section of " + str(
            dict[sample]["xsec"]) + ". Stopping"
        sys.exit()
    best_guess = suggestion(dict, sample)
    if best_guess > 0 and xsec < 0:
        print "automatically fetching cross-section from previously known samples"
        xsec = best_guess
    if xsec <= 0.0:
        print "no proper cross-section defined. Stopping"
        sys.exit()
    dict[sample]["xsec"] = float(xsec)
    print "\n new datasets.json entry: "
    pprint.pprint(dict[sample])
    save_database(dict, dataset)
Example #22
0
File: igor.py Project: lulzzz/paul
def main_read_info (options):
    """
    IBW -> ASCII conversion
    """
    log.debug ("reading %s..." % options.info)
    wav = wave_read (options.info)
    pprint.pprint (wav.info)
Example #23
0
def save_data(dataset):
    new_dataset = []

    for i, x in enumerate(dataset):
        sentence = x['sentenceTokens']
        verbs = []
        etc = []
        
        print('NEW SOURCE SENTENCE')
        #print(sentence)
        
        for vkey in x['verbEntries'].keys():
            z = x['verbEntries'][vkey]['questionLabels']
            verbs.append([sentence[int(vkey)]])
            for qkey in z.keys():
                verbs[-1].append([qkey])
                for qa in z[qkey]['answerJudgments']:
                    if qa['isValid']:
                        verbs[-1][-1].append(qa['spans'][0])
                        
        pprint.pprint(verbs)
        print(sentence)
        
        print()
        if i == 3:
            break
Example #24
0
    def get_top_ten(self):
        topten = ['hotmail.com', 'gmail.com', 'yahoo.com', 'aol.com', 
                'comcast.com', 'mail.ru', 'web.de', 'qq.com', 'gmx.net',
                'naver.com']
        
        print "Domain\tTLS\tVerified\t"
        results = []
        for domain in topten:
            self.cur.execute("select id from domains where domain = '%s';" % domain)
            dom_id = self.cur.fetchone()[0]
            self.cur.execute("select mx_id from domains_mx where domain_id = %s;" % dom_id)
            mx_id = self.cur.fetchone()[0]
            self.cur.execute("select server_id from mx_servers where mx_id = %s;" % mx_id)
            serv_id = self.cur.fetchone()[0]

            self.cur.execute("select tls from servers where id = %s;" % serv_id)
            tls = self.cur.fetchone()[0]
            self.cur.execute("select ssl_verified from servers where id = %s;" % serv_id)
            verified = self.cur.fetchone()[0]
            results.append([domain, tls, verified])

            #num_servers_tls = int(self.cur.fetchone()[0])
            #self.cur.execute("select count(*) from Servers where ssl_verified = 'True';")

        pprint.pprint(results)
Example #25
0
    def test_pytorch_linear_nn(self):
        """Test linear pytorch model training and UDF application"""

        model = SimpleNetwork()

        MachineLearningPytorchTestCase.train_pytorch_model(model=model)

        dir = os.path.dirname(openeo_udf.functions.__file__)
        file_name = os.path.join(dir, "datacube_pytorch_ml.py")
        udf_code = UdfCodeModel(language="python",
                                source=open(file_name, "r").read())

        temp = create_datacube(name="temp",
                               value=1,
                               dims=("x", "y"),
                               shape=(2, 2))

        ml = MachineLearnModelConfig(
            framework="pytorch",
            name="linear_model",
            description=
            "A pytorch model that adds two numbers in range of [1,1]",
            path="/tmp/simple_linear_nn_pytorch.pt")
        udf_data = UdfData(proj={"EPSG": 4326},
                           datacube_list=[temp],
                           ml_model_list=[ml])
        run_user_code(code=udf_code.source, data=udf_data)
        pprint.pprint(udf_data.to_dict())
Example #26
0
def runTest():
    print("Test script:")
    import pprint
    print("Component tree of all installed components:")
    pprint.pprint(getAllComponents_tree())
    print("")
    test_cfg = RHNOptions(sys.argv[1])
#    test_cfg = RHNOptions('server.app')
#    test_cfg = RHNOptions('proxy.broker')
#    test_cfg = RHNOptions('proxy.redirect', _CONFIG_ROOT)
#    test_cfg = RHNOptions('proxy.redirect', '/tmp')
#    test_cfg.filename = 'empty.conf'
    test_cfg.parse()
    print("=============== the object's repr ================================")
    print(test_cfg)
    print("=============== the object's defaults ============================")
    pprint.pprint(test_cfg.getDefaults())
    print("=============== an erronous lookup example =======================")
    print("testing __getattr__")
    try:
        print(test_cfg.lkasjdfxxxxxxxxxxxxxx)
    except AttributeError:
        e = sys.exc_info()[1]
        print('Testing: "AttributeError: %s"' % e)
    print("")
    print("=============== the object's merged settings ======================")
    test_cfg.show()
    print("=============== dump of all relevant dictionaries =================")
    test_cfg.showall()
    print("===================================================================")
    def handle( self, *args, **options ):

        def enrich( obj ):
            #s = unicode( obj ).lower()
            s = obj.strip().lower()
            # simple stop words
            s = re.sub( r"\b(the|of|in|a)\b", "", s, re.IGNORECASE )
            # type prefixes
            s = re.sub( r"^(trailer|review|report|screenshots|video):\s*", "", s, re.IGNORECASE )
            return s

        trigrams = defaultdict(int)

        for ln in sys.stdin:
            n = NGram( warp=2.5, iconv=enrich, N=3 )
            s = n.iconv( ln )
            s = n.pad( s )
            for gram in n.ngrams( s ):
                trigrams[gram] += 1

        
        max_idf = math.sqrt(math.log(float(len(trigrams))/1.0))
        #print len(trigrams)
        #return
        # set weights to sqrt(log(N/n))
        for word in trigrams.keys():
            # 1.0 constant to account for missing words
            #trigrams[word] = math.sqrt(math.log(float(len(trigrams))/(1.0+trigrams[word])))
            # normalize
            trigrams[word] = math.sqrt(math.log(float(len(trigrams))/(1.0+trigrams[word])))/max_idf

        # (missing trigram) value
        trigrams['$$$'] = 1.0
        import pprint            
        pprint.pprint( sorted(trigrams.iteritems(), key=operator.itemgetter(1)) )
def describe_tweets(input_json_file):
    ## count things
    data = []
    k = j = 0
    with open(input_json_file,'r') as jsonFile:
        for tweet in jsonFile:
            jObj = json.loads(tweet)
            
            ## filter tweets for those that are in English
            if jObj[0]['lang']== 'en':
                ## pop'n the dictionary
                twt_stats ={
                    'usr_ment_cnt': len(jObj.get('entities',{}).get('user_mentions', {})),
                    'url_cnt'     : len(jObj.get('entities',{}).get('urls',{})),
                    'hstgs_cnt'   : len(jObj.get('entities',{}).get('hashtags',{})),
                    'rt_cnt'      : jObj.get('retweet_count'),
                    'fv_cnt'      : jObj.get('favorite_count')
                }
                pprint.pprint(tw_stats)
                data.append(twt_stats)
                k +=1
            else:
                j +=1
    #print np.size(data)
    print k , 'tweets processed'
    not_en = (j/(j+k)) * 100.0
    print '%.2f percent of tweets are not English' % (not_en)
    return data
 def error(self, exception):
     # print("lfcli_base error: %s" % exception)
     pprint.pprint(exception)
     traceback.print_exception(Exception, exception, exception.__traceback__, chain=True)
     if self.halt_on_error:
         print("halting on error")
         sys.exit(1)
Example #30
0
    def Package_pay(self):
        """统计包支付上报请求 https://ssl08.haloapps.com/p/ggdirect """
        event = Reqspon.Recode(
            recode_file="doublewin.har",
            read_file="doublewin.yml",
            yaml_name="/p/ggdirect",
            yaml_url="https://ssl08.haloapps.com/p/ggdirect")
        """
        同样的地址抓捕的时候抓出很多会话,找出符合条件的请求
        """
        Session = []
        for id, data in enumerate(event[0:]):
            session = event[id]
            Session.append(session)
            #if data["request"]["json"]["eventName"] == "af_purchase":  # 相同地址多个会话的请求
            # pprint(event[id])
            #session = event[id]
            #Session.append(session)
        pprint.pprint(Session)
        idn = 0  # 相同的数据 取数控制器
        revent = Session[idn]  # 取刷选后请求的第一个
        url = revent["request"]["url"]
        data = revent["request"]["data"]
        headers = revent["request"]["headers"]
        method = revent["request"]["method"]
        res = requests.post(url=url, data=data, headers=headers)
        ##############
        # 写入当前时间,下标从1开始
        now = int(time.time())  # 显示为时间戳
        timeArray = time.localtime(now)
        currentTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
        #########################
        respon_status_code = res.status_code  # 返回状态码
        respon_headers = res.headers  # 返回头
        respon_body = json.loads(res.text)  # 返回体
        respon_cookies = res.cookies
        respon_history = res.history
        print(respon_body)
        print(respon_status_code, respon_headers, respon_cookies, respon_body,
              respon_history)
        if respon_status_code == 200:
            """处理返回数据解析表格输出数据"""
            idnum = idn + 1  ##序号
            code = respon_status_code
            message = respon_body["message"]  # 返回信息
            product_id = data["product_id"]  ##产品ID
            player_id = data["cp_player_id"]  # 用户账户
            userid = data["device_id"]  # userID
            game_version = data["product_version"]  # 游戏版本
            sdk_version = "None"  # SDK版本
            androidid = "None"  #Android ID

            event_list = [
                idnum, code, message, product_id, player_id, userid,
                game_version, sdk_version, androidid, currentTime
            ]
            print(event_list)

        return event_list
Example #31
0
    def Accountkit(self):
        """统计包登陆上报请求 https://accountkit.haloapps.com/v1.0/account/trace """
        event = Reqspon.Recode(
            recode_file="doublewin.har",
            read_file="doublewin.yml",
            yaml_name="/v1.0/account/trace",
            yaml_url="https://accountkit.haloapps.com/v1.0/account/trace")
        """
        同样的地址抓捕的时候抓出很多会话,找出符合条件的请求
        """
        Session = []
        for id, data in enumerate(event[0:]):
            session = event[id]
            Session.append(session)
            #if data["request"]["json"]["eventName"] == "af_purchase":  # 相同地址多个会话的请求
            # pprint(event[id])
            #session = event[id]
            #Session.append(session)
        print(id)
        pprint.pprint(Session)
        idn = 0  # 相同的数据 取数控制器
        revent = Session[idn]  # 取刷选后请求的第一个
        url = revent["request"]["url"]
        data = revent["request"]["data"]
        headers = revent["request"]["headers"]
        method = revent["request"]["method"]
        res = requests.post(url=url, data=data, headers=headers)
        ##############
        # 写入当前时间,下标从1开始
        now = int(time.time())  # 显示为时间戳
        timeArray = time.localtime(now)
        currentTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
        #########################
        respon_status_code = res.status_code  # 返回状态码
        respon_headers = res.headers  # 返回头
        respon_body = json.loads(res.text)  # 返回体
        respon_cookies = res.cookies
        respon_history = res.history
        print(respon_body)
        print(respon_status_code, respon_headers, respon_cookies, respon_body,
              respon_history)
        if respon_status_code == 200:
            """处理返回数据解析表格输出数据"""
            idnum = idn + 1  ##序号
            code = respon_status_code  #请求状态返回code
            message = respon_body["message"]  # 返回信息
            vid = respon_body["data"]["vid"]  ##vid
            player_id = data["gameAccountId"]  # 玩家账号 也是用户信息
            userid = data["staToken"]  # userID
            token = data["thirdPartyToken"]  # 三方token
            package_name = data["packageName"]  # 包名
            platform = data["platform"]  #平台
            event_list = [
                idnum, code, message, vid, player_id, userid, token,
                package_name, platform, currentTime
            ]
            print(event_list)

        return event_list
Example #32
0
def main(files):
    import pprint
    if not files: raise Exception("usage: %s foo.html [bar.html ...]" % sys.argv[0])
    for fname in files:
        print "%s as person:" % fname
        print pprint.pprint(scrape_person(fname))
        print "%s as state:" % fname
        print pprint.pprint(scrape_state(fname))
Example #33
0
 def write(self, obj):
     """Serialize."""
     from io import StringIO
     output = StringIO()
     pprint.pprint(obj, stream=output, indent=4)
     contents = output.getvalue()
     output.close()
     return contents
Example #34
0
 def write(self, obj):
     """Serialize."""
     from io import StringIO
     output = StringIO()
     pprint.pprint(obj, stream=output, indent=4)
     contents = output.getvalue()
     output.close()
     return contents
Example #35
0
File: igor.py Project: lulzzz/paul
def main_pack_list (options):
    '''
    Lists contents of Igor Packed file
    '''
    log.debug ("Scanning %s...")
    pack_tree = pack_scan_tree (options.list)
    print "Pack Tree:"
    pprint.pprint (pack_tree)
Example #36
0
 def printData(self):
     vehicle_info = dict({
         'vid':self.vid,
         'VDI':self.VDI,
         'VTI':self.VTI,
         'RTI':self.RTI
         })
     pprint.pprint(test, width=20, indent=2)
Example #37
0
def getDates(client):
    startDATE = os.getenv('startDATE', "")
    endDATE = os.getenv('endDATE', "")

    if startDATE == "":
        print '>>> Calculating StartDATE '
        date_query = " select max(date) as date from " + DATASET_ID + "." + MASTER_TABLE_ID
        print(">>> " + date_query)
        date_query_job = client.query(date_query)
        try:
            results = date_query_job.result()
            print '>>> Executed the max Date query '
            print '>>> Fetching the date row,if any '
            for row in results:
                if row.date == None:
                    startDATE = ""
                else:
                    startDATE = str(row.date)
                    startDATE = datetime.datetime.strptime(startDATE, '%Y%m%d').strftime("%Y-%m-%d")
            if startDATE == "":
                print '>>> No max Date row '
                yesterDate = (datetime.datetime.now() - datetime.timedelta(2)).strftime('%Y-%m-%d')
                startDATE = yesterDate
            else:
                print '>>> setting the startDATE to next day of last updated data'
                tempDATE = str(datetime.datetime.strptime(startDATE, "%Y-%m-%d") - datetime.timedelta(2))
                print tempDATE
                startDATE = str(datetime.datetime.strptime(tempDATE, '%Y-%m-%d 00:00:00').strftime("%Y-%m-%d"))
                print '>>> startDate is: ' + startDATE
        except Exception as e:
            pprint.pprint(">>> Failed to get Max Date from the Master Table %s " % e)
            type_, value_, traceback_ = sys.exc_info()
            pprint(traceback.format_tb(traceback_))
            print(type_, value_)
            sys.exit(1)
    else:
        print '>>> Settiing startDATE from Jenkins parm'
        startDATE = os.environ['startDATE']

    if endDATE == "":
        print '>>> Going to calculate endDate'
        yesterDate = (datetime.datetime.now() - datetime.timedelta(2)).strftime('%Y-%m-%d')
        endDATE = yesterDate
    else:
        print '>>> Setting the endDATE from Jenkins parm'
        endDATE = os.environ['endDATE']

    startDATE = str(startDATE)
    endDATE = str(endDATE)

    if startDATE > endDATE:
        print '>>> startDATE greater than endDATE ?? I am setting both as endDATE'
        startDATE = endDATE

    print '>>> startDate : ' + startDATE
    print '>>> endDate: ' + endDATE

    return [startDATE, endDATE]
Example #38
0
 def push(self, message: dict):
     if message['type'] in self.messageTypes:
         logging.debug("MESSAGE RECEIVED")
         pprint.pprint(message)
         messageType = self.messageTypes[message['type']]
         message = messageType(message)
         self.signal.emit(message)
     else:
         logging.debug("INVALID MESSAGE TYPE", message['type'])
Example #39
0
 def test_user_feeds(self):
     feeds = self.user.get_feeds()
     self.assertIsNotNone(feeds)
     for item in feeds['resource']['link']:
         print "%s => %s" % (item['title'], item['href'])
         try:
             #pass
             dump_object(self.user.get_resource(item['href']))
         except NetflixError as e:
             pprint.pprint(e)
Example #40
0
    def test_relativize(self):
        rel = relativize_datestamps(TASK_DEF)
        import pprint

        pprint.pprint(rel)
        assert rel["created"] == {"relative-datestamp": "0 seconds"}
        assert rel["deadline"] == {"relative-datestamp": "86400 seconds"}
        assert rel["expires"] == {"relative-datestamp": "31536001 seconds"}
        assert rel["payload"]["artifacts"]["public"]["expires"] == {
            "relative-datestamp": "31536000 seconds"
        }
Example #41
0
 def fit_all_models(self, pipe=True, cv=4, n_iter=10):
     for label in self.dicts.keys():
         print ('Check %s' % label)
         # send label to distributer if not yet estimated
         if label not in self.result.keys():
             if pipe:
                 self._piped_distributer(label, cv, n_iter)
             else:
                 self._distributer(label, cv, n_iter)
         else:
             pprint.pprint(('score: %0.2f') % self.result[label]['score'])
def index():
    # Auth Step 1: Authorization
    url_args = "&".join([
        "{}={}".format(key, urllib.request.quote(val))
        for key, val in auth_query_parameters.items()
    ])
    auth_url = "{}/?{}".format(SPOTIFY_AUTH_URL, url_args)
    return redirect(auth_url)

    #@app.route("/callback/q")
    #def callback():
    # Auth Step 4: Requests refresh and access tokens
    auth_token = request.args['code']
    code_payload = {
        "grant_type": "authorization_code",
        "code": str(auth_token),
        "redirect_uri": REDIRECT_URI
    }
    base64encoded = base64.b64encode("{}:{}".format(CLIENT_ID, CLIENT_SECRET))
    headers = {"Authorization": "Basic {}".format(base64encoded)}
    post_request = requests.post(SPOTIFY_TOKEN_URL,
                                 data=code_payload,
                                 headers=headers)

    # Auth Step 5: Tokens are Returned to Application
    response_data = json.loads(post_request.text)
    access_token = response_data["access_token"]
    refresh_token = response_data["refresh_token"]
    token_type = response_data["token_type"]
    expires_in = response_data["expires_in"]

    # Auth Step 6: Use the access token to access Spotify API
    authorization_header = {"Authorization": "Bearer {}".format(access_token)}

    Requests = requests.get('https://api.spotify.com/v1/me/top/tracks',
                            headers=authorization_header)
    r = Requests.json()
    pprint.pprint(r)

    # Get profile data
    user_profile_api_endpoint = "{}/me".format(SPOTIFY_API_URL)
    profile_response = requests.get(user_profile_api_endpoint,
                                    headers=authorization_header)
    profile_data = json.loads(profile_response.text)

    # Get user playlist data
    playlist_api_endpoint = "{}/playlists".format(profile_data["href"])
    playlists_response = requests.get(playlist_api_endpoint,
                                      headers=authorization_header)
    playlist_data = json.loads(playlists_response.text)

    # Combine profile and playlist data to display
    display_arr = [profile_data] + playlist_data["items"]
def test():

    tags = count_tags('example.osm')
    pprint.pprint(tags)
    assert tags == {'bounds': 1,
                     'member': 3,
                     'nd': 4,
                     'node': 20,
                     'osm': 1,
                     'relation': 1,
                     'tag': 7,
                     'way': 1}
Example #44
0
def main():
    updater = Updater(os.environ['BOT_TOKEN'])

    updater.dispatcher.add_handler(
        InteractiveHandler(
            start_game,
            entry_points=[
                CommandHandler('start_game', None),
            ],
            fallbacks=[
                CommandHandler('cancel', cancel_game),
                CallbackQueryHandler(info_button, pattern=r'^info$'),
            ],
            per_chat=True,
            per_user=False,
            per_message=False,
        ))

    updater.dispatcher.add_handler(CommandHandler('help', help))

    webhook_port = os.environ.get('WEBHOOK_PORT')
    if webhook_port:
        sys.stdout.flush()

        webhook_url = os.environ.get(
            'URL_PREFIX', 'https://%s:%s/' % (
                os.environ['WEBHOOK_FQDN'],
                webhook_port,
            )) + 'api/' + os.environ['BOT_TOKEN']

        updater.start_webhook(
            listen=os.environ.get('WEBHOOK_LISTEN', '0.0.0.0'),
            port=int(webhook_port),
            url_path='api/' + os.environ['BOT_TOKEN'],
            key=os.environ.get('WEBHOOK_KEY'),
            cert=os.environ.get('WEBHOOK_CERT'),
            webhook_url=webhook_url,
            clean=True,
        )

        info = updater.bot.get_webhook_info()
        print('Webhook info:')
        import pprint
        pprint.pprint(str(info))

    else:
        updater.start_polling(clean=True, timeout=10)

    print('Blood Bound Bot ready for serving!')
    sys.stdout.flush()

    updater.idle()
Example #45
0
def optimise_smallest_distance_with_intensity(enders, starters, end, intense):

    import itertools
    import numpy as np
    import pprint as pp

    if len(starters) > 2 * len(enders):

        # exit(f'Too many daugters: {len(starters)} daughters for {len(enders)} parents')
        enders += ['blank']

    if len(starters) != 2 * len(enders):
        starters += ['blank'] * (2 * len(enders) - len(starters))

    total_dis_dic = {}

    # print(list(itertools.permutations(starters, 4)), enders)
    loop = 0

    for startCells in itertools.permutations(starters, 4):
        tot_dis = 0
        loop += 1

        for endCell, stCellgroup in zip(
                enders, np.array_split(startCells, len(enders))):

            for stC in stCellgroup:

                if stC == 'blank' or endCell == 'blank':
                    tot_dis += 10000
                    continue

                stx, sty, stz = pos_dic[end + 1][stC]
                endx, endy, endz = pos_dic[end][endCell]

                dis = (stx - endx)**2 + (sty - endy)**2 + (stz - endz)**2

                tot_dis += dis

        total_dis_dic[tot_dis] = (enders, startCells)
    pp.pprint(total_dis_dic)

    min_dis = min(total_dis_dic)

    linked = []
    ender_Cells, start_Cells = total_dis_dic[min_dis]
    for endCell, stCellgroup in zip(
            ender_Cells, np.array_split(start_Cells, len(ender_Cells))):
        for stC in stCellgroup:
            linked.append((endCell, stC))

    return linked
Example #46
0
def process_message(data, config={}):
    global ADMIN, database, slack
    slack = config.get('slack_client', None)
    ADMIN = config.get('ADMIN', '')
    database = config.get('database', None)
    channel_id = data['channel']
    channelname = slack.get_channelname(channel_id)
    user = {}
    user['id'] = get_user_id(data)
    if not user['id']:
        return

    conn = sqlite3.connect(database)
    c = conn.cursor()
    import pprint
    pprint.pprint(data)
    import sys
    sys.stdout.flush()
    if data['text'].startswith("!"):
        freq_table = 'cmd_freq'
    else:
        freq_table = 'chat_freq'
    c.execute(
        '''create table if not exists {} (user TEXT PRIMARY KEY, count INT)'''.
        format(freq_table))
    c.execute(
        '''create table if not exists friends (id INTEGER PRIMARY KEY AUTOINCREMENT, user_a TEXT, user_b TEXT, UNIQUE (user_a, user_b) ON CONFLICT IGNORE)'''
    )
    c.execute(
        '''create table if not exists coins (user TEXT PRIMARY KEY, coins INTEGER DEFAULT 0)'''
    )
    c.execute(
        '''create table if not exists pokemons (id INTEGER PRIMARY KEY AUTOINCREMENT, user TEXT, race INTEGER, level INTEGER, exp INTEGER, i_hp INTEGER, i_atk INTEGER, i_def INTEGER, i_satk INTEGER, i_sdef INTEGER, i_spd INTEGER)'''
    )
    conn.commit()

    user['name'] = slack.get_username(user['id'])
    print("[general] msg: {} from username: {}, channel: {} ({})".format(
        data['text'].encode('utf8'), user['name'], channelname, channel_id))

    if data['text'].startswith("!"):
        msgs = data['text'].split(" ")
        if len(msgs) == 2:
            cmd = msgs[0]
            target = msgs[1]
            binary_command(cmd, target, channel_id, user, conn)
        elif len(msgs) == 1:
            cmd = msgs[0]
            unary_command(cmd, channel_id, user, conn)

    update_freq(data['text'], user['name'], conn)
    conn.close()
Example #47
0
def show_invoice(request):
    invoice_data = {}
    client = MongoClient('mongodb://localhost:27017/')
    db = client['invoices']
    collection = db['invoices']
    print collection.find_one({"_id": ObjectId(request.matchdict['invoice_id'])})
    invoice_data =  collection.find_one({"_id": ObjectId(request.matchdict['invoice_id'])})
    collection = db['core_data']
    import pprint
    invoice_data['core_data'] = collection.find_one({})
    pprint.pprint(collection.find_one({}))
    pprint.pprint(invoice_data['core_data']['company_name'])
    return invoice_data
Example #48
0
    def _piped_distributer(self, label, cv, n_iter):

        normal = Normalizer()
        pca = PCA()
        selection = SelectKBest(k='all')
        features = FeatureUnion(['reduction', pca], ('selection', selection))
        clf = self.dicts[label]['clf']

        pipeline = Pipeline([('scale', normal),
                             ('features', features),
                             ('clf', clf)])

        piped_param = {}
        piped_param['features__reduction__n_components'] = [5]
        for (name, value) in self.dicts[self.label]['param'].items():
            piped_param['clf__' + name] = value

        try:
            print ('try RandomizedSearchCV...')
            start = time()
            rs = RandomizedSearchCV(estimator=pipeline,
                                    param_distributions=piped_param,
                                    scoring=self.scoring,
                                    cv=self.cv,
                                    n_jobs=-1,
                                    n_iter=self.n_iter)

        # else grid search cv
        except ValueError:
            print ('try GridSearchCV...')
            start = time()
            rs = GridSearchCV(estimator=pipeline,
                              param_grid=piped_param,
                              scoring=self.scoring,
                              cv=self.cv,
                              n_jobs=-1)

        rs.fit(self.X_train, self.y_train)

        self.result[label] = {}
        self.result[label]['clf'] = rs.best_estimator_
        self.result[label]['score'] = rs.best_score_
        self.result[label]['time'] = time() - start
        self.result[label]['set'] = ('n_iter: %s cv: %s' % (n_iter, cv))

        pprint.pprint(self.result[label])
        pprint.pprint(rs.grid_scores_)

        out_result = open(self.result_address, 'wb')
        pickle.dump(self.result, out_result)
        out_result.close()
Example #49
0
    def test_get_about(self):

        return

        entry_id_1 = u'11EIs1ZxCykme0FnAdY8Xm_ktUCQ9y5lHC3EwAKFsiFk'
        entry1 = EntryCache.get_instance().cache.get(entry_id_1)
#        result = PathRelations.get_instance().register_entry(entry1)

        entry_id_2 = u'0AJFt2OXeDBqSUk9PVA'
#        entry2 = EntryCache.get_instance().cache.get(entry_id_2)
#        result = PathRelations.get_instance().register_entry(entry2)

        path_relations = PathRelations.get_instance()

        #print(len(entry.parents))
#        path_relations.dump_ll()

#        print(AccountInfo().root_id)

#        path_relations.dump_entry_clause('0AJFt2OXeDBqSUk9PVA')
#        PathRelations.get_instance().dump_entry_clause('0B5Ft2OXeDBqSSmdIek1aajZtVDA')
#        return
#        entry_clause = path_relations.get_entry_clause_by_id(entry_id_1)
        #result = path_relations.find_path_components_goandget('/')

        result = path_relations.get_child_filenames_from_entry_id(entry_id_2)

        from pprint import pprint
        pprint(result)

#        result = EntryCache.get_instance().cache.get(u'11EIs1ZxCykme0FnAdY8Xm_ktUCQ9y5lHC3EwAKFsiFk')
#        result = EntryCache.get_instance().cache.get(u'11EIs1ZxCykme0FnAdY8Xm_ktUCQ9y5lHC3EwAKFsiFk')
#        result = EntryCache.get_instance().cache.get(u'11EIs1ZxCykme0FnAdY8Xm_ktUCQ9y5lHC3EwAKFsiFk')
#        result = EntryCache.get_instance().cache.get(u'11EIs1ZxCykme0FnAdY8Xm_ktUCQ9y5lHC3EwAKFsiFk')
#        print(result)
        return
        

#        result = AccountInfo().root_id

        #about = drive_proxy('get_about_info')

#        entries = drive_proxy('get_children_under_parent_id', parent_id=about.root_id)
        #entries = drive_proxy('get_parents_over_child_id', child_id=u'11EIs1ZxCykme0FnAdY8Xm_ktUCQ9y5lHC3EwAKFsiFk')


#        print(response[u'rootFolderId'])
        import pprint
#        pprint.pprint(response[u'importFormats'])
        pprint.pprint(result)
Example #50
0
    def write(self, output, resources, notebook_name='notebook', **kw):
        """
        Consume and write Jinja output.

        See base for more...
        """

        if 'outputs' in resources:
            print("outputs extracted from %s" % notebook_name)
            print('-' * 80)
            pprint.pprint(resources['outputs'], indent=2, width=70)
        else:
            print("No outputs extracted from %s" % notebook_name)
        print('=' * 80)
Example #51
0
def process_map(file_in, pretty = False):
    # You do not need to change this file
    file_out = "{0}.json".format(file_in)
    data = []
    with codecs.open(file_out, "w") as fo:
        for _, element in ET.iterparse(file_in):
            el = shape_element(element)
            if el:
                data.append(el)
                if pretty:
                    fo.write(json.dumps(el, indent=2)+"\n")
                else:
                    fo.write(json.dumps(el) + "\n")
    return data
    pprint.pprint(data)
Example #52
0
def test_execution():
    """ Method loads a sample corpus, executes the extraction
        and prints the state of the etractor for inspection.
    """
    #f = open('corpora/easy', 'r')
    #text = f.read()
    #f.close()

    from corpus import get_wiki_text
    text = get_wiki_text()

    extractor = C_NC_TermExtractor(text)
    extractor.compute_cnc()
    import pprint
    pprint.pprint(extractor.__dict__)
Example #53
0
def remove_matching_entries(dict, query): 
	remove_samples = []
	for sample, values in dict.iteritems():
		matches = True
		for name, attribute in values.iteritems():
			if not name in query: continue
			if not (re.match(str(query[name]), str(attribute)) != None):#query[name] != attribute: 
				matches = False
		if matches:
			remove_samples.append(sample)
	print "removing the following samples: "
	pprint.pprint( remove_samples)
	for sample in remove_samples:
		del dict[sample]
	return dict
Example #54
0
def display_tree(tree):
    if nltk_is_available:
        count = 0
        for t in tree:
            cf = CanvasFrame()
            tc = TreeWidget(cf.canvas(), t)
            cf.add_widget(tc, 10, 10)
            count += 1
            fileName = "tree" + repr(count) + ".ps"
            cf.print_to_file(fileName)
            cf.destroy()
    else:
        count = 0
        for t in tree:
            count += 1
            fileName = "tree" + repr(count) + ".txt"
            pprint.pprint(t, fileName)
Example #55
0
    def test_update_facebook_share_settings(self):
        # verify that the linked account was properly added
        linkedAccounts = self.showLinkedAccounts(self.token)
        self.assertEqual(len(linkedAccounts), 1)
        self.assertEqual(linkedAccounts['facebook']['service_name'], 'facebook')

        # update share settings: enable share stamps on facebook

        path = "account/linked/facebook/settings/update.json"
        data = {
            "oauth_token"   : self.token['access_token'],
            'service_name'   : 'facebook',
            'on'   : 'share_stamps,share_follows,share_todos,share_likes',
            }
        self.handlePOST(path, data)

        # verify that share settings were updated
        linkedAccounts = self.showLinkedAccounts(self.token)
        self.assertEqual(linkedAccounts['facebook']['share_settings']['share_stamps'], True)
        self.assertEqual(linkedAccounts['facebook']['share_settings']['share_follows'], True)
        self.assertEqual(linkedAccounts['facebook']['share_settings']['share_todos'], True)
        self.assertEqual(linkedAccounts['facebook']['share_settings']['share_likes'], True)

        data = {
            "oauth_token"   : self.token['access_token'],
            'service_name'   : 'facebook',
            'off'   : 'share_stamps,share_follows',
            }
        self.handlePOST(path, data)

        linkedAccounts = self.showLinkedAccounts(self.token)
        self.assertEqual(linkedAccounts['facebook']['share_settings']['share_stamps'], False)
        self.assertEqual(linkedAccounts['facebook']['share_settings']['share_follows'], False)
        self.assertEqual(linkedAccounts['facebook']['share_settings']['share_todos'], True)
        self.assertEqual(linkedAccounts['facebook']['share_settings']['share_likes'], True)

        path = "account/linked/facebook/settings/show.json"
        data = {
            "oauth_token"   : self.token['access_token'],
            "service_name"  : 'facebook',
            }
        result = self.handleGET(path, data)

        import pprint
        pprint.pprint(result)
Example #56
0
File: raxis.py Project: RAPD/RAPD
def main(args):
    """
    The main process docstring
    This function is called when this module is invoked from
    the commandline
    """

    print "main"

    if args.file:
        test_image = os.path.abspath(args.file)
    else:
        raise Exception("No test image input!")
        # test_image = ""

    header = read_header(test_image)

    pprint.pprint(header)
Example #57
0
def main():
	parser = optparse.OptionParser(usage="usage: %prog [options]",
	                               description="Script to extend datasets.json by another sample")
	# sample
	parser.add_option("-s", "--sample", help="official Sample Sting")
	# crosssection
	parser.add_option("-x", "--xsec", help="Sample cross section")


	(options, args) = parser.parse_args()
	sample = options.sample
	xsec = options.xsec
	xsec = eval_expr(xsec.replace('x', '*').replace(' ', ''))
	dict = load_database(dataset)
	dict[sample]["xsec"] = float(xsec)
	import pprint
	pprint.pprint( dict[sample])
	save_database(dict, dataset)
Example #58
0
def test_basics():
    """Test the progress being made
    """
    code = get_code()

    insert_project(
        "Test Proj", # Name of test project
        "/home/wg",  # location of the project on the local file system
        "https://github.com/testus/test", # URL of project on web
        code # collection to insert this data into
        )

    for x in code.find():
        pprint.pprint(x)

    code.remove({"name":"Test Proj"})

    assert code.count() == 0