Beispiel #1
0
 def get_disks(self):
     ''' Get disks info data '''
     try:
         disk_partitions = psutil.disk_partitions()
         # print str(disk_partitions)
         # print json.dumps(disk_partitions,default=lambda obj: obj.__dic__)
         print json.dumps(disk_partitions)
         temp_str = json.dumps(disk_partitions)
         temp_obj = json.loads(temp_str)
         print str(temp_obj)
         self.__save_data_to_json(temp_str)
         pass
     except Exception as e:
         self.__logger.exception(e)
     pass
Beispiel #2
0
def dump_for_hb_a(infile):
    dpl = DumpPresentationLayer(infile)
    dpl.get_mean_emotions_values()
    dpl.get_emotion_types_values()
    dpl.get_mean_emotion_types_values()

    average_tweet = dpl.get_average_tweet_for_group(dpl.emotions_values,
                                                    dpl.df, "worry")
    average_covid_tweet = dpl.get_average_tweet_for_group(
        dpl.emotions_values, dpl.df_covid_related_terms, "worry")
    data = pd.concat([
        dpl.emotion_types_values_covid_related_terms, dpl.emotion_types_values
    ],
                     axis=1).to_dict(orient="index")
    data_series = []
    for key, value in data.items():
        data_series.append({
            "name": key,
            "x": [v / 100 for v in value.values()]
        })
    with open("../data/pl-hb-a.json", "w") as outfile:
        data = json.dumps({
            "y": [
                "An Average COVID-19 Tweet",
                "An Average Tweet",
            ],
            "commments": [f'"{average_covid_tweet}"', f'"{average_tweet}"'],
            "data_series":
            data_series,
        })
        outfile.write(data)
Beispiel #3
0
 def test_plus_encode(self):
     # specify separators manually, because JSONEncoder already receives 
     # separators set from simplejson.dumps() to a different default 
     # than we use in jsonplus.dumps()
     self.assertEqual(
         simplejson.dumps(self.plus, sort_keys=True, separators=(',', ':'), cls=json.JSONEncoder), 
         json.dumps(self.plus, sort_keys=True))
Beispiel #4
0
 def test_inf_representation(self):
     a = json.dumps(float("inf"))
     # TODO: we want this:
     #b = '{"__class__":"float","__value__":"inf"}'
     # unfortunately, with current version of simplejson, all we can get is:
     b = 'Infinity'
     self.assertEqual(b, a)
Beispiel #5
0
 def test_django_money(self):
     m = Money(313, 'USD')
     dm = DjangoMoney(313, 'USD')
     obj = jsonplus.loads(jsonplus.dumps(dm))
     self.assertEqual(obj, dm)
     self.assertTrue(hasattr(obj, 'is_localized'))
     self.assertTrue(hasattr(dm, 'is_localized'))
     self.assertFalse(hasattr(m, 'is_localized'))
Beispiel #6
0
 def test_user_encoder_compat(self):
     @json.encoder('mytype', exact=False)
     def mytype_encoder(obj):
         return obj.y
     class mytype(object):
         y = 313
     self.assertEqual(json.dumps(mytype(), sort_keys=True, exact=False),
                      '313')
Beispiel #7
0
    def test_user_encoder_exact(self):
        class mytype1(object):
            x = 313

        @jsonplus.encoder('mytype1', exact=True)
        def mytype_encoder(obj):
            return obj.x

        self.assertEqual(jsonplus.dumps(mytype1(), sort_keys=True, exact=True),
                         '{"__class__":"mytype1","__value__":313}')
Beispiel #8
0
    def test_user_encoder_compat(self):
        class mytype1(object):
            x = 313

        @jsonplus.encoder('mytype1', exact=False)
        def mytype_encoder(obj):
            return obj.x

        self.assertEqual(
            jsonplus.dumps(mytype1(), sort_keys=True, exact=False), '313')
	def record(self, tick:int, event: GameEvent):
		self.file.write(f"{tick}: ")

		if isinstance(event, GameSysAction):
			self.file.write(f"{event.action.value} ")
			self.file.write(jsonplus.dumps(event.payload))
		
		elif isinstance(event, PlayerMove):
			self.file.write(f"{event.pid} {event.action.value}")

		self.file.write("\n")
		self.file.flush()
Beispiel #10
0
    def list_accounts_from_parent(
    ):  # creates a new file with accouts info. origin: aws

        with open('accounts_from_parent.txt', 'w'):
            pass  # makes the file empty for each run
        account = boto3.session.Session(
            profile_name='default',
            region_name='eu-west-1')  # Session = Default (Organization access)
        client = account.client('organizations')
        with open('parentids.txt',
                  'r') as parents:  # OU file to get accounts from
            for id in parents:
                id = id.strip("\n")

                # weak part- hardcoded. each row represents a request that is limited to 20 accounts. overall supports 100 accounts:

                response = client.list_accounts_for_parent(ParentId=id)
                response1 = client.list_accounts_for_parent(
                    ParentId=id, NextToken=response['NextToken'])
                response2 = client.list_accounts_for_parent(
                    ParentId=id, NextToken=response1['NextToken'])
                response3 = client.list_accounts_for_parent(
                    ParentId=id, NextToken=response2['NextToken'])
                response4 = client.list_accounts_for_parent(
                    ParentId=id, NextToken=response3['NextToken'])

                for res in [
                        response, response1, response2, response3, response4
                ]:
                    accounts = res['Accounts']
                    dict_len = len(accounts)
                    y = jplus.dumps(accounts, indent=4)
                    j = jplus.loads(y)

                    counter = 0
                    while counter < dict_len:
                        name = j[counter]['Name']
                        email = j[counter]['Email']
                        id = j[counter]['Id']
                        with open(
                                'accounts_from_parent.txt', mode='a'
                        ) as file:  # Appends name,email,id to the empty file
                            writer = csv.writer(file, delimiter=',')
                            writer.writerow([id, name, email])
                        counter += 1
Beispiel #11
0
    def test_encoder_predicate_over_classname(self):
        class mycls(object):
            uniq = 313

        @jsonplus.encoder('mycls')
        def mycls_encoder_1(obj):
            return None

        @jsonplus.encoder('mycls', lambda obj: hasattr(obj, 'uniq'))
        def mycls_encoder_2(obj):
            return obj.uniq

        # classname-based encoder is tested after the predicate-based one
        a = mycls()
        b = json.loads(jsonplus.dumps(a, exact=True))

        self.assertEqual(b['__class__'], type(a).__name__)
        self.assertEqual(b['__value__'], a.uniq)
Beispiel #12
0
    def test_encoder_priority(self):
        class mycls(object):
            pass

        @jsonplus.encoder('mycls', lambda obj: isinstance(obj, mycls))
        def _enc1(obj):
            return 'invalid-priority-1000'

        @jsonplus.encoder('mycls',
                          lambda obj: isinstance(obj, mycls),
                          priority=500)
        def _enc2(obj):
            return 'valid'

        r = json.loads(jsonplus.dumps(mycls(), exact=True))

        self.assertEqual(r['__class__'], 'mycls')
        self.assertEqual(r['__value__'], 'valid')
Beispiel #13
0
def dump_for_ts_b(infile):
    dpl = DumpPresentationLayer(infile)
    dpl.normalise_datetimes()
    dpl.groupby_date()
    dpl.make_column_rolling(column="worry")
    with open("../data/pl-ts-a.json", "w") as outfile:
        data = json.dumps({
            "dates":
            dpl.emotions_values_by_date["date"].tolist(),
            "data_series": [{
                "name":
                "worry",
                "y":
                dpl.emotions_values_by_date["worry"].apply(
                    lambda v: v / 100).tolist(),
            }],
        })
        outfile.write(data)
Beispiel #14
0
    def test_encoder_predicates(self):
        class mycls1(object):
            snowflake = 313

        class mycls2(object):
            bulldozer = 131

        @jsonplus.encoder('mycls1', lambda obj: hasattr(obj, 'snowflake'))
        def mycls1_encoder(obj):
            return obj.snowflake

        @jsonplus.encoder('mycls2', lambda obj: hasattr(obj, 'bulldozer'))
        def mycls2_encoder(obj):
            return obj.bulldozer

        a = [mycls2(), mycls1()]
        b = json.loads(jsonplus.dumps(a, exact=True))

        self.assertEqual(b[0]['__value__'], a[0].bulldozer)
        self.assertEqual(b[1]['__value__'], a[1].snowflake)
def doc_generator(df):
    for batchRecord in batch(df, 499):
        df_iter = batchRecord.iterrows()
        for index, document in df_iter:
            recordset = []
            recordset.append({
                'Data':
                bytes(
                    json.dumps({
                        'gender': document['gender'],
                        'emp_no': document['emp_no'],
                        'birth_date': document['birth_date'],
                        'last_name': document['last_name'],
                        'hire_date': document['hire_date'],
                        'first_name': document['first_name']
                    }))
            })
            response = client.put_record_batch(
                DeliveryStreamName=
                'glueKinesisEsDemo-ElasticSearchDeliveryStream-KDNUH7D8KJZG',
                Records=recordset)
            print(response)
Beispiel #16
0
 def test_basic_dumps(self):
     self.assertEqual(json.dumps(self.basic, sort_keys=True), self.basic_dumps)
Beispiel #17
0
 def dump_and_load(self, val, **kwargs):
     return json.loads(json.dumps(val, **kwargs))
Beispiel #18
0
def _main():
    logger = logging.getLogger()
    arguments = docopt(__doc__, version=__version__)
    json.prefer_compat()
    if arguments["--debug"]:
        logger.setLevel(logging.DEBUG)
    logging.debug(arguments)
    api = WhoisXMLAPI()
    results = ""
    search_type = "current"
    mode = "preview"
    if arguments["--historic"]:
        search_type = "historic"
    if arguments["--purchase"]:
        mode = "purchase"
    if arguments["bulk"]:
        with open(arguments["<input_file>"]) as input_file:
            domains = list(
                map(lambda line: line.rstrip(), input_file.readlines()))
            results = api.bulk_whois(domains)
            if arguments["--csv"]:
                results = results["csv"]
            else:
                results = results["structured"]
                results = dict(results=results)
    elif arguments["reverse"]:
        results = api.reverse_whois(arguments["<term>"],
                                    exclude_terms=arguments["<exclude_term>"],
                                    search_type=search_type,
                                    mode=mode)
        if arguments["--purchase"]:
            results = results["domainsList"]

    elif arguments["history"]:
        results = api.whois_history(arguments["<domain>"],
                                    since_date=arguments["--since"],
                                    mode=mode)
        if arguments["--purchase"]:
            results = dict(results=results)

    elif arguments["brand"]:
        results = api.brand_alert(arguments["<term>"],
                                  exclude_terms=arguments["<exclude_term>"],
                                  since_date=arguments["--since"],
                                  mode=mode)

        if arguments["--purchase"]:
            if arguments["--csv"]:
                results = _to_csv(results, "alert")
            else:
                results = dict(results=results)
    elif arguments["registrant"]:
        results = api.registrant_alert(
            arguments["<term>"],
            exclude_terms=arguments["<exclude_term>"],
            since_date=arguments["--since"],
            mode=mode)
        if arguments["--purchase"]:
            if arguments["--csv"]:
                results = _to_csv(results, "alert")
            else:
                results = dict(results=results)
    elif arguments["reverse-ip"]:
        results = api.reverse_ip(arguments["<ip>"])
        if arguments["--csv"]:
            results = _to_csv(results, "reverse")
        else:
            results = dict(results=results)
    elif arguments["reverse-mx"]:
        results = api.reverse_mx(arguments["<mx>"])
        if arguments["--csv"]:
            results = _to_csv(results, "reverse")
        else:
            results = dict(results=results)
    elif arguments["reverse-ns"]:
        results = api.reverse_ns(arguments["<ns>"])
        if arguments["--csv"]:
            results = _to_csv(results, "reverse")
        else:
            results = dict(results=results)
    elif arguments["balances"]:
        results = api.get_account_balances()
    else:
        # The default action is a WHOIS lookup
        thin_whois = True
        if arguments["--verbose"]:
            thin_whois = False
        results = api.whois(arguments["<domain>"], thin_whois=thin_whois)

    # Format output
    if type(results) is dict:
        results = json.dumps(results, indent=2, ensure_ascii=False)
    elif type(results) is list:
        results = "\n".join(results)
    if arguments["--output"]:
        filename = arguments["--output"]
        if type(arguments["--output"]) == list:
            filename = arguments["--output"][0]
        with open(filename,
                  "w",
                  encoding="utf-8",
                  errors="ignore",
                  newline="\n") as output_file:
            output_file.write(results)
    else:
        print(results)
Beispiel #19
0
 def _put_encoded(self, key: str, value: object):
     encoded_value = jsonplus.dumps(value).encode('utf-8')
     super().put(key, encoded_value)
    def _after_execute(self, conn, clause, multiparams, params, result):
        logger = self.loggers.get(current_thread().ident)
        if not logger:
            return

        try:
            start_time = clause.start_time
        except AttributeError:
            start_time = self.tmp.pop(id(clause))
        stop_time = time()
        duration = (stop_time - start_time) * 1000
        config = dt_settings.get_config()

        try:
            raw_compiled = clause.compile(dialect=self.engine.dialect,
                                          compile_kwargs={})

        except AttributeError:
            try:
                parameters = _distill_params(multiparams, params)
            except InvalidRequestError:
                parameters = []
            raw_sql = " ".join(six.text_type(clause).splitlines())

        else:
            try:
                ctx = CursorlessExecutionContext._init_compiled(
                    self.engine.dialect,
                    conn,
                    conn._Connection__connection,
                    raw_compiled,
                    _distill_params(multiparams, params),
                )
            except Exception:
                parameters = []
                raw_sql = " ".join(six.text_type(clause).splitlines())
            else:
                parameters = ctx.parameters
                raw_sql = " ".join(ctx.statement.splitlines())

        try:
            sql = " ".join(
                six.text_type(
                    clause.compile(dialect=self.engine.dialect,
                                   compile_kwargs={"literal_binds":
                                                   True})).splitlines())
        except (CompileError, TypeError, NotImplementedError, AttributeError):
            # not all queries support literal_binds
            sql = raw_sql

        if config["ENABLE_STACKTRACES"]:
            stacktrace = tidy_stacktrace(reversed(get_stack()))
        else:
            stacktrace = []

        template_info = get_template_info()

        params = {
            "vendor":
            conn.dialect.name,
            "alias":
            self.alias,
            "sql":
            sql,
            "duration":
            duration,
            "raw_sql":
            raw_sql,
            "params":
            json.dumps([
                list(i) if isinstance(i, (list, tuple)) else i
                for i in parameters if i
            ]),
            'raw_params':
            tuple(
                tuple(i.items() if isinstance(i, dict) else i)
                for i in parameters),
            "stacktrace":
            stacktrace,
            "start_time":
            start_time,
            "stop_time":
            stop_time,
            "is_slow":
            duration > config["SQL_WARNING_THRESHOLD"],
            "is_select":
            sql.lower().strip().startswith("select"),
            "template_info":
            template_info,
        }

        logger.record(**params)

        return params
Beispiel #21
0
f.close()
with open("d:/hello.txt", 'r') as f:
    string3 = f.read()
    print(string3)

with open("d:/hello.txt", 'a') as f:
    flag = f.writable()
    string3 = f.write("\n刘颖慧,你好!!")
    print(string3)
    print(flag)

#os.mkdir('d:/liuyinghui')
#os.rmdir('d:/liuyinghui')
print(os.name, os.environ['OS'])
jsonDict = dict(name='liuyinghui', age=35, datetime=datetime.datetime.today())
jsonstr = json.dumps(jsonDict)
print(jsonstr)
print(jsonDict)
jsonDict2 = json.loads(jsonstr)
print(jsonDict2)
print(json.dumps(std.__dict__))
print(type(std))


def jsonHanle(d):
    return St(d['name'], d['score'])


std1 = json.loads(json.dumps(std.__dict__), object_hook=jsonHanle)
print(std1)
print(type(std1))
Beispiel #22
0
def send(writer, data):
    serialized_data = jsonplus.dumps(data).encode()
    writer.write(struct.pack('>I', len(serialized_data)))
    writer.write(serialized_data)
Beispiel #23
0
 def dumps(*pa, **kw):
     return jsonplus.dumps(*pa, **kw)
Beispiel #24
0
    for record in hash_index:
        if property['hash'] in hash_index[record]:
            config_list = re.findall('[0-9]{1,3}.config', record)
            if len(config_list) > 1:
                raise SystemError(
                    "More than one configuration detected in path {}".format(
                        record))

            property['configurations'].add(
                re.findall('[0-9]{1,3}.config', record)[0])
            count = count + 1
    property['configurations'] = list(property['configurations'])
    property['configurations'].sort()
    property['num_occurrences'] = len(property['configurations'])
    logging.info('Report ' + property['hash'] + ' was present in ' +
                 str(len(property['configurations'])) + ' configurations.')

    # Also add the description list to the property
    property['matching_description'] = description_index[property['hash']]

    # Add empty investigation results field
    property['investigation'] = {'result': None, 'comments': ''}

print(str(len(master)) + ' unique bugs found.')

# Write output to file
with open(args.output, 'w') as out:
    content = json.dumps(master, out)
    out.write(content)
    print('Output written to ' + args.output)
Beispiel #25
0
 def get_prep_value(self, value):
     if value is None:
         return value
     return jsonplus.dumps(value)
Beispiel #26
0
 def test_basic_loads_dumps(self):
     basic = json.loads(self.basic_dumps)
     self.assertEqual(json.dumps(basic, sort_keys=True), self.basic_dumps)
Beispiel #27
0
 def test_plus_dumps(self):
     self.assertEqual(json.dumps(self.plus, sort_keys=True), self.plus_dumps)
Beispiel #28
0
 def save(self):
     self._prepare_storage()
     with open('%s/data.json' % self.path, 'w') as f:
         f.write(json.dumps(self.data))
Beispiel #29
0
 def test_inf_representation(self):
     a = json.dumps(float("inf"))
     b = 'null'
     self.assertEqual(b, a)