Example #1
0
def data_bubble(request, id=None):
    dashboard = get_object_or_404(Dashboard, id=id)
    dbCon = ConnectedDatabase.objects.get(project=dashboard.project)
    workboard = get_object_or_404(Workboard, id=request.POST.get('workboard', None))
    variables = json.loads(request.POST.get('variables', None))
    engine = create_engine(dbCon.connection_string, echo=True)
    cnx = engine.raw_connection()
    collection_name = list(variables.keys())[0]

    column_names = []
    yaxis = None
    for item in variables[collection_name]:
        column_names.append(item['name'])
        if item['type'] == 'string':
            xaxis = item['name']
        elif yaxis == None and item['type'] == 'integer':
            yaxis = item['name']
        elif item['type'] == 'integer':
            zaxis = item['name']

    finalResult = []

    try:
        data = pd.read_sql(
            'Select \"' + collection_name + "\".\"" + xaxis + '\" as x, \"' + collection_name + "\".\"" + yaxis \
            + '\" as y, \"' + collection_name + "\".\"" + zaxis + '\" as size FROM \"' + collection_name + "\"",
            cnx)

        datasum = data.groupby(['x']).sum()
        datasum['index_col'] = range(0, len(datasum))
        datamin = data.groupby(['x']).min()
        datamin['index_col'] = range(0, len(datamin))
        datamax = data.groupby(['x']).max()
        datamax['index_col'] = range(0, len(datamax))
        dataavg = data.groupby(['x']).mean()
        dataavg['index_col'] = range(0, len(dataavg))

        resultsumdump = json.JSONDecoder().decode(datasum.reset_index().to_json(orient="records"))
        resultmindump = json.JSONDecoder().decode(datamin.reset_index().to_json(orient="records"))
        resultmaxdump = json.JSONDecoder().decode(datamax.reset_index().to_json(orient="records"))
        resultavgdump = json.JSONDecoder().decode(dataavg.reset_index().to_json(orient="records"))

        finalResult.append(
            {'sum': resultsumdump, 'avg': resultavgdump, 'min': resultmindump, 'max': resultmaxdump, 'var_name': xaxis})

        json_data = {
            'data': json.dumps(finalResult),
            'analysis_type': request.POST.get('type', None),
            'columns': column_names,
            'workboard': workboard,
            'dashboard': dashboard,
            'yaxis': yaxis,
            'zaxis': zaxis
        }

    except Exception as e:
        json_data = {
            'analysis_type': request.POST.get('type', None),
            'error': e.args[0],
            'dashboard': dashboard,
            'workboard': workboard
        }

    template = render_to_string('dashboards/dashboard_workboard.html', json_data)
    return HttpResponse(json.dumps(template), content_type='application/json')
Example #2
0
    def find_results(self):
        resultlist = []

        if self.type == 'book':
            set_url = self.url + urllib.quote('intitle:' + '"' + self.name +
                                              '"')
        else:
            set_url = self.url + urllib.quote('inauthor:' + '"' + self.name +
                                              '"')

        logger.info('Searching url: ' + set_url)

        try:
            startindex = 0
            resultcount = 0
            ignored = 0
            while True:

                self.params['startIndex'] = startindex
                URL = set_url + '&' + urllib.urlencode(self.params)

                try:
                    jsonresults = json.JSONDecoder().decode(
                        urllib2.urlopen(URL, timeout=30).read())
                except HTTPError, err:
                    logger.Error(
                        'Google API returned HTTP Error - probably time/rate limiting - [%s]'
                        % err.msg)

                startindex = startindex + 40

                for item in jsonresults['items']:

                    # skip if no author, no author is no book.
                    try:
                        authorname = item['volumeInfo']['authors'][0]
                    except KeyError:
                        logger.debug('Skipped a result without authorfield.')
                        break

                    try:
                        #skip if language is in ignore list
                        booklang = item['volumeInfo']['language']
                        if not booklang in lazylibrarian.IMP_PREFLANG:
                            ignored = ignored + 1
                            break
                    except KeyError:
                        ignored = ignored + 1
                        logger.debug(
                            'Skipped a result where no language is found')
                        break

                    try:
                        bookpub = item['volumeInfo']['publisher']
                    except KeyError:
                        bookpub = None

                    try:
                        booksub = item['volumeInfo']['subtitle']
                    except KeyError:
                        booksub = None

                    try:
                        bookdate = item['volumeInfo']['publishedDate']
                    except KeyError:
                        bookdate = '0000/00/00'

                    try:
                        bookimg = item['volumeInfo']['imageLinks']['thumbnail']
                    except KeyError:
                        bookimg = 'images/nocover.png'

                    try:
                        bookrate = item['volumeInfo']['averageRating']
                    except KeyError:
                        bookrate = 0

                    try:
                        bookpages = item['volumeInfo']['pageCount']
                    except KeyError:
                        bookpages = '0'

                    try:
                        bookgenre = item['volumeInfo']['categories'][0]
                    except KeyError:
                        bookgenre = None

                    try:
                        bookdesc = item['volumeInfo']['description']
                    except KeyError:
                        bookdesc = 'Not available'

                    try:
                        if item['volumeInfo']['industryIdentifiers'][0][
                                'type'] == 'ISBN_10':
                            bookisbn = item['volumeInfo'][
                                'industryIdentifiers'][0]['identifier']
                        else:
                            bookisbn = 0
                    except KeyError:
                        bookisbn = 0

                    resultlist.append({
                        'authorname':
                        authorname,
                        'bookid':
                        item['id'],
                        'bookname':
                        item['volumeInfo']['title'],
                        'booksub':
                        booksub,
                        'bookisbn':
                        bookisbn,
                        'bookpub':
                        bookpub,
                        'bookdate':
                        bookdate,
                        'booklang':
                        booklang,
                        'booklink':
                        item['volumeInfo']['canonicalVolumeLink'],
                        'bookrate':
                        float(bookrate),
                        'bookimg':
                        bookimg,
                        'bookpages':
                        bookpages,
                        'bookgenre':
                        bookgenre,
                        'bookdesc':
                        bookdesc
                    })

                    resultcount = resultcount + 1

        except KeyError:
            logger.info('Found %s results for %s with name: %s' %
                        (resultcount, self.type, self.name))
            if ignored > 0:
                logger.info(
                    'Skipped %s results because it is not a preferred language.'
                    % ignored)

        return resultlist
Example #3
0
def get_asset(uuid):
    url = "{LR_ORIGIN}/c/portal/truenth/asset/detailed?uuid={uuid}".format(
        LR_ORIGIN=current_app.config["LR_ORIGIN"], uuid=uuid)
    data = requests.get(url).content
    return json.JSONDecoder().decode(data)['asset']
#!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
import json
import sys
shard = sys.argv[1]

with open(shard, "r") as f:
    input_value = json.JSONDecoder().decode(f.read())
    out = []

    if "Error" in input_value:
        with open("errors", "a") as errors:
            print("Data error: user %s %s" %
                  (input_value["User"], input_value["Error"]),
                  file=errors)
    else:
        for doc in input_value["Docs"]:
            out.append({
                "Key": doc,
                "Value": {
                    "User": input_value["User"],
                    "Team": input_value["Team"]
                }
            })

    print(json.JSONEncoder().encode(out))
Example #5
0
import scrapy
import json
decoder = json.JSONDecoder()

#> pnp_previews_href.json && scrapy runspider pnp_2.py -o pnp_previews_href.json

with open('pnp_sections.json', 'r') as fp:
    list_input = json.load(fp)

print list_input
print len(list_input)
print list_input[0]['href']

list_urls = []
list_urls.append('https://www.pnp.ru')

for x in list_input:
    print x['href']
    string = x['href']
    list_urls.append(string)

print list_urls


class QuotesSpider(scrapy.Spider):
    name = 'quotes'
    start_urls = list_urls

    def parse(self, response):
        for quote in response.css('div.item div.title'):
            yield {
## @author zhouyuefeng([email protected])
## @date 2014/06/19 13:06:33
## @brief
##

import sys
import MySQLdb
import json
import time

cur_time = "\"" + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(
    time.time())) + "\""

with open('conf/default.conf', 'r') as confFile:
    confStr = confFile.read()
conf = json.JSONDecoder().decode(confStr)
dbStaticResult = conf['database']['db_bim_rap_result_db']

conn = MySQLdb.connect(host=dbStaticResult['host'],\
        user=dbStaticResult['user'],\
        passwd=dbStaticResult['password'],\
        db=dbStaticResult['database'],\
        port=dbStaticResult['port'])
cur = conn.cursor()

tagged_query_dict_url = sys.stdin.readline().strip()
tagged_query_dict_detail_url = sys.stdin.readline().strip()

insertion = 'insert into tagged_query_dict(task_id, dict_file_url, insert_datetime, update_datetime ) values ("' + sys.argv[
    1] + '","' + tagged_query_dict_url + '",' + cur_time + ' ,' + cur_time + ' )'
cur.execute(insertion)
Example #7
0
import asyncio
from collections import OrderedDict
from datetime import datetime
import hashlib
import json
import ldap_login
import queue
import socket
import time
from threading import Thread
from util import prt, uniq
import uuid
import websocket

json_decoder = json.JSONDecoder(object_pairs_hook=OrderedDict)
connected2hoarder = False
company = 'hm'
department = 'cf'
group = 'jmw'
token = '441234567890=='
agents = {}
ws = None
agent_job2queue = {}


def handle_agent_action(sock, agent, action, data):
    if action == 'register':
        agent = data['agent']
        inputs = uniq(data['inputs'])
        cleanses = uniq(data['cleanses'])
Example #8
0
def read_storage(storage_path):
    with open(storage_path, 'r') as file_read:
        data_read = file_read.read()
        data_read = json.JSONDecoder().decode(data_read)
    return data_read
Example #9
0
def BuildSTBLChanges () -> bool:
	canBuildSTBL = STBL.CanBuildSTBL()  # type: bool

	if not canBuildSTBL:
		return False

	for package in Mod.GetCurrentMod().Packages:  # type: Mod.Package
		if not os.path.exists(package.STBLPath):
			continue

		for stblXMLFileName in os.listdir(package.STBLPath):  # type: str
			stblXMLFilePath = os.path.join(package.STBLPath, stblXMLFileName)  # type: str

			if os.path.isfile(stblXMLFilePath) and os.path.splitext(stblXMLFileName)[1].casefold() == ".xml":
				manifestFilePath = os.path.splitext(stblXMLFilePath)[0] + "_Manifest.json"  # type: str

				modifiedTime = os.path.getmtime(stblXMLFilePath)  # type: float
				builtModifiedTime = None  # type: typing.Optional[int]
				builtFileNames = list()  # type: typing.List[str]

				try:
					if os.path.exists(manifestFilePath):
						with open(manifestFilePath) as manifestFile:
							manifest = json.JSONDecoder().decode(manifestFile.read())  # type: typing.Dict[str, typing.Any]

						if not isinstance(manifest, dict):
							raise Exceptions.IncorrectTypeException(manifest, "Root", (dict,))

						if ManifestBuiltModifiedTimeKey in manifest:
							builtModifiedTime = manifest[ManifestBuiltModifiedTimeKey]

						if not isinstance(builtModifiedTime, float) and not isinstance(builtModifiedTime, int):
							incorrectValue = builtModifiedTime  # type: typing.Any
							builtModifiedTime = None
							raise Exceptions.IncorrectTypeException(incorrectValue, "Root[%s]" % ManifestBuiltModifiedTimeKey, (dict,))

						if ManifestBuiltFileNamesKey in manifest:
							builtFileNames = manifest[ManifestBuiltFileNamesKey]

						if not isinstance(builtFileNames, list):
							incorrectValue = builtFileNames  # type: typing.Any
							builtFileNames = list()
							raise Exceptions.IncorrectTypeException(incorrectValue, "Root[%s]" % ManifestBuiltFileNamesKey, (dict,))

						for builtFileNameIndex in range(len(builtFileNames)):  # type: int
							builtFileName = builtFileNames[builtFileNameIndex]  # type: str

							if not isinstance(builtFileName, str):
								builtFileNames = list()
								raise Exceptions.IncorrectTypeException(builtFileName, "Root[%s][%s]" % (ManifestBuiltFileNamesKey, builtFileNameIndex), (dict,))

				except Exception as e:
					print("Failed to read STBL manifest file at '" + manifestFilePath + "'\n" + str(e), file = sys.stderr)

				missingBuiltFile = False  # type: bool

				for builtFileName in builtFileNames:
					builtFilePath = os.path.join(os.path.join(package.SourceLoosePath, "STBL"), builtFileName)  # type: str

					if not os.path.exists(builtFilePath):
						missingBuiltFile = True
						break

				if missingBuiltFile or modifiedTime != builtModifiedTime:
					buildTempDirectory = stblXMLFilePath + "_Temp_Build"  # type: str

					if not os.path.exists(buildTempDirectory):
						os.makedirs(buildTempDirectory)

					try:
						STBL.BuildSTBL(buildTempDirectory, stblXMLFilePath)

						manifest = dict()  # type: typing.Dict[str, typing.Any]

						manifest[ManifestBuiltModifiedTimeKey] = modifiedTime
						builtFileNames = list()

						for builtFileName in os.listdir(buildTempDirectory):
							builtFilePath = os.path.join(buildTempDirectory, builtFileName)

							if os.path.isfile(builtFilePath):
								builtFileNames.append(builtFileName)

						manifest[ManifestBuiltFileNamesKey] = builtFileNames

						with open(manifestFilePath, "w+") as manifestFile:
							manifestFile.write(json.JSONEncoder(indent = "\t").encode(manifest))

						dir_util.copy_tree(buildTempDirectory, os.path.join(package.SourceLoosePath, "STBL"))
					finally:
						shutil.rmtree(buildTempDirectory)

	return True
Example #10
0
 def parse_json(self):
     self.result = [json.JSONDecoder(object_pairs_hook=OrderedDict).decode(self.page)]
Example #11
0
class FileOper:
    _lock = None
    fileName = 'RunTimeConfig.dat'
    dirName = 'RunTimeData'
    log = logger.log
    filePos = 0
    jsonDecoder = json.JSONDecoder()
    jsonEncoder = json.JSONEncoder()

    @property
    def lock(self):
        return self._lock

    def __init__(self, fileName='RunTimeConfig.dat', forWrite=True):
        if not os.path.exists(self.dirName):
            os.makedirs(self.dirName)
        self._lock = threading.Lock()
        self.fileName = self.dirName + '/' + fileName
        self.forWrite = forWrite

    def writeObj(self, obj):
        with self.lock:
            try:
                with open(self.fileName, 'a+') as fh:
                    # pickle.dump(obj, fh)
                    objStr = self.jsonEncoder.encode(obj)
                    fh.write(objStr + '\n')
            except:
                self.log._log.error('写文件失败!%s', sys.exc_info()[1])

    def readObj(self):
        with self.lock:
            try:
                if not os.path.exists(self.fileName):
                    return None
                if self.filePos == os.path.getsize(self.fileName):
                    return None
                with open(self.fileName, 'r+') as fh:
                    fh.seek(self.filePos)
                    # obj = pickle.load(fh)
                    obj = self.jsonDecoder.decode(fh.readline())
                    self.filePos = fh.tell()
                return obj
            except:
                print(sys.exc_info()[1])
                self.log._log.error('读文件失败!%s', sys.exc_info()[1])
            return None

    def clean(self):
        with self.lock:
            try:
                if not os.path.exists(self.fileName):
                    return
                with open(self.fileName, 'wb+') as fh:
                    fh.truncate()
            except:
                self.log._log.error('清空文件失败!%s', sys.exc_info()[1])

    def reachEnd(self):
        with self.lock:
            try:
                if self.filePos == os.path.getsize(self.fileName):
                    return True
                else:
                    return False
            except:
                self.log._log.error('获取文件状态失败!%s', sys.exc_info()[1])
            return True
Example #12
0
#      condition in libghfu

server_log_file_path = os.path.join(path, "Server", "log")
server_log_file = open(server_log_file_path, "a")


def server_log(_log):
    server_log_file.write("\n{}: {}".format(time.asctime(), _log))


app = Flask(__name__)

known_clients = "127.0.0.1"

jencode = json.JSONEncoder().encode
jdecode = json.JSONDecoder().decode

#mutex = threading.Lock()

LAST_PERFORMED_MONTHLY_OPERATIONS = [0, 0, 0]

TRANSACTION_CHECK_DELAY = .5  # delay for checking if pending transaction has been effected
JPESA_DEPOSIT_CHARGES = .03  # also applies when tranfering funds from jpesa to jpesa
JPESA_WITHDRAW_CHARGES = 500.0

CODES = {
}  # every time a random code is generated, its stored here to hold transaction data
# so when the client wants to know about the state of a transaction, i dont querry the
# jpesa api but rather, i consult with this dictionary and return whatever the latest status
# of the transaction is kept here. the moment a code is nolonger needed, its deleted otherwise
# the server would end up consuming GBs of memory holding un-needed data!
def read_json_content(filename):
    with open(filename, 'r') as f:
        return json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(f.read(os.stat(filename).st_size))
Example #14
0
    def __init__(self, socket_path, executor=None, logger=logging, encoder_cls=json.JSONEncoder, decoder=json.JSONDecoder()):
        self.socket_path = socket_path
        self.encoder_cls = encoder_cls
        self.decoder = decoder
        self.executor = executor
        self.logger = logger

        # Do we require the compatibility mode?
        self._compat = True
        self.next_id = 0
Example #15
0
    def get(self, request, format=None):

        variables = json.loads(request.GET.get('variables', None))
        for item in variables:
            print item['variable_name']
            if item['axis'] == 'x':
                xaxis = item['variable_name']
            if item['axis'] == 'y':
                yaxis = item['variable_name']

        #collection name check
        if variables[0]['collection_name'] == variables[1]['collection_name']:
            collectionname = variables[0]['collection_name']


        engine = create_engine('mysql+pymysql://root:@localhost/test?charset=utf8', echo=True)
        conn = engine.connect(close_with_result=True)

        cnx = engine.raw_connection()
        xx = pd.read_sql('Select '+ xaxis +' as label, '+ yaxis +' as value FROM '+ collectionname, cnx)



        xxsum = xx.groupby(['label']).sum()
        xxsum['index_col'] = range(1, len(xxsum) + 1)
        xxmin = xx.groupby(['label']).min()
        xxmin['index_col'] = range(1, len(xxmin) + 1)
        xxmax = xx.groupby(['label']).max()
        xxmax['index_col'] = range(1, len(xxmax) + 1)
        xxavg = xx.groupby(['label']).mean()
        xxavg['index_col'] = range(1, len(xxavg) + 1)

        resultsumdump = json.JSONDecoder().decode(xxsum.reset_index().to_json(orient="records"))
        resultmindump = json.JSONDecoder().decode(xxmin.reset_index().to_json(orient="records"))
        resultmaxdump = json.JSONDecoder().decode(xxmax.reset_index().to_json(orient="records"))
        resultavgdump = json.JSONDecoder().decode(xxavg.reset_index().to_json(orient="records"))





        # resultsum = conn.execute('Select district as label, SUM(salary_basic) as value FROM teacher_final_data group by district;')
        # resultAvg = conn.execute('SELECT district as label, AVG(salary_basic) as value FROM teacher_final_data group by district;')
        # resultMin = conn.execute('SELECT district as label, MIN(salary_basic) as value FROM teacher_final_data group by district;')
        # resultMax = conn.execute('SELECT district as label, SUM(salary_basic) as value FROM teacher_final_data group by district;')
        #
        # resultsumdump = json.JSONDecoder().decode(json.dumps([dict(r) for r in resultsum], cls=DjangoJSONEncoder))
        # resultavgdump = json.JSONDecoder().decode(json.dumps([dict(r) for r in resultAvg], cls=DjangoJSONEncoder))
        # resultmindump = json.JSONDecoder().decode(json.dumps([dict(r) for r in resultMin], cls=DjangoJSONEncoder))
        # resultmaxdump = json.JSONDecoder().decode(json.dumps([dict(r) for r in resultMax], cls=DjangoJSONEncoder))

        finalRes =[];
        finalRes.append({"sum": resultsumdump,"avg": resultavgdump, "min": resultmindump, "max": resultmaxdump})

        # agg = request.GET.get('agg', None)
        # if agg == "sum":
        #     # queryset = ChartData.objects.values('label').annotate(value=Sum('value'))
        #     result = conn.execute('Select district as label, SUM(salary_basic) as value FROM teacher_final_data group by district;')
        # elif agg == "avg":
        #     # queryset = ChartData.objects.values('label').annotate(value=Avg('value'))
        #     result = conn.execute('SELECT id, district as label, AVG(salary_basic) as value FROM teacher_final_data group by district;')
        # elif agg == "min":
        #     # queryset = ChartData.objects.values('label').annotate(value=Min('value'))
        #     result = conn.execute(
        #         'SELECT id, district as label, MIN(salary_basic) as value FROM teacher_final_data group by district;')
        # elif agg == "max":
        #     # queryset = ChartData.objects.values('label').annotate(value=Max('value'))
        #     result = conn.execute(
        #         'SELECT id, district as label, MAX(salary_basic) as value FROM teacher_final_data group by district;')
        # else:
        #     result = conn.execute(
        #         'SELECT  id, district as label, SUM(salary_basic) as value FROM teacher_final_data group by district;')
        # # result = conn.execute(
        # #     'SELECT district as label, SUM(salary_basic) as value FROM teacher_final_data group by district;')
        # a = json.dumps([dict(r) for r in result], cls=DjangoJSONEncoder)
        # b = json.JSONDecoder().decode(a)
        response = Response(finalRes, status=status.HTTP_200_OK)
        return response
Example #16
0
    def _read_only_self(self):
        """
        Parse request file and return a tuple of pvs, metadata and includes.

        In case of problems returns (but does not raise) exceptions.
                OSError
                ReqParseError
                    ReqFileFormatError
                    ReqFileInfLoopError

        :return: A tuple (pv_list, metadata, includes_list)
        """

        pvs = list()
        includes = list()

        try:
            with open(self._path) as f:
                file_data = f.read()
        except OSError as e:
            return e

        if file_data.lstrip().startswith('{'):
            try:
                md = file_data.lstrip()
                metadata, end_of_metadata = \
                    json.JSONDecoder().raw_decode(md)
            except json.JSONDecodeError:
                msg = f"{self._path}: Could not parse JSON metadata header."
                return ReqParseError(msg)

            # Ensure line counts make sense for error reporting.
            actual_data = md[end_of_metadata:].lstrip()
            actual_data_index = file_data.find(actual_data)
            self._curr_line_n = len(file_data[:actual_data_index].splitlines())
            file_data = file_data[actual_data_index:]
        else:
            metadata = {}
            self._curr_line_n = 0

        for self._curr_line in file_data.splitlines():
            self._curr_line_n += 1
            self._curr_line = self._curr_line.strip()

            # skip comments, empty lines and "data{}" stuff
            if not self._curr_line.startswith(('#', "data{", "}", "!")) \
               and self._curr_line.strip():
                # First replace macros, then check if any unreplaced macros
                # which are not "global"
                pvname = SnapshotPv.macros_substitution(
                    (self._curr_line.rstrip().split(',', maxsplit=1)[0]),
                    self._macros)

                try:
                    # Check if any unreplaced macros
                    self._validate_macros_in_txt(pvname)
                except MacroError as e:
                    return ReqParseError(
                        self._format_err((self._curr_line_n, self._curr_line),
                                         e))

                pvs.append(pvname)

            elif self._curr_line.startswith('!'):
                # Calling another req file
                split_line = self._curr_line[1:].split(',', maxsplit=1)

                if len(split_line) > 1:
                    macro_txt = split_line[1].strip()
                    if not macro_txt.startswith(('\"', '\'')):
                        return ReqFileFormatError(
                            self._format_err(
                                (self._curr_line_n, self._curr_line),
                                'Syntax error. Macro argument must be quoted'))
                    else:
                        quote_type = macro_txt[0]

                    if not macro_txt.endswith(quote_type):
                        return ReqFileFormatError(
                            self._format_err(
                                (self._curr_line_n, self._curr_line),
                                'Syntax error. Macro argument must be quoted'))

                    macro_txt = SnapshotPv.macros_substitution(
                        macro_txt[1:-1], self._macros)
                    try:
                        # Check for any unreplaced macros
                        self._validate_macros_in_txt(macro_txt)
                        macros = parse_macros(macro_txt)

                    except MacroError as e:
                        return ReqParseError(
                            self._format_err(
                                (self._curr_line_n, self._curr_line), e))

                else:
                    macros = dict()

                path = os.path.join(os.path.dirname(self._path), split_line[0])
                msg = self._check_looping(path)
                if msg:
                    return ReqFileInfLoopError(
                        self._format_err((self._curr_line_n, self._curr_line),
                                         msg))

                try:
                    sub_f = SnapshotReqFile(path, parent=self, macros=macros)
                    includes.append(sub_f)

                except OSError as e:
                    return OSError(
                        self._format_err((self._curr_line, self._curr_line_n),
                                         e))

        return (pvs, metadata, includes)
Example #17
0
新浪微博:http://weibo.com/ziyuetk
'''
import urllib2, json
from city import city

yourcity = raw_input("你想查那个城市的天气?")

#测试yourcity变量是否可以返回你想要的值
#print yourcity

url = "http://www.weather.com.cn/data/cityinfo/" + city[yourcity] + ".html"

#print url #同上

response = urllib2.urlopen(url, timeout=10)
city_dict = response.read()

#此处打印出来是一个json格式的东西
#print city_dict

#用Python的json库来解析获取到的网页json内容
jsondata = json.JSONDecoder().decode(city_dict)

#定义几个变量用来储存解析出来的内容
temp_low = jsondata['weatherinfo']['temp1']
temp_high = jsondata['weatherinfo']['temp2']
weather = jsondata['weatherinfo']['weather']

print yourcity
print weather
print temp_low + "~" + temp_high
Example #18
0
#!/usr/bin/env python
#coding:utf-8

#########################################################################################################################
## Version : 0.0.7-1
## Developer : Yannyann (https://github.com/a2d8a4v)
## Website : https://www.yannyann.com
## License : MIT License
#########################################################################################################################

import sys, urllib2, socket, json
try:
    socket.setdefaulttimeout(5)
    if len(sys.argv) == 1:
        apiurl = "http://ip-api.com/json"
    elif len(sys.argv) == 2:
        apiurl = "http://ip-api.com/json/%s" % sys.argv[1]
    content = urllib2.urlopen(apiurl).read()
    content = json.JSONDecoder().decode(content)
    if content['status'] == 'success':
        print(content['isp'])
    else:
        print("NO")
except:
    print("Usage:%s IP" % sys.argv[0])
Example #19
0
 def _load_layers(self):
     model_path = self._get_model_path()
     decoder = json.JSONDecoder()
     with open(model_path, 'r') as hand:
         data = hand.read().strip()
     return decoder.decode(data)
Example #20
0
    def update_app(self):
        contract = jc.Contract()

        spec = {}
        for name, value in self.initial_app_spec.items():
            if name == 'name':
                spec[name] = value
            elif name == 'cloudProviders':
                spec[name] = value + ',kubernetes'
            elif name in ['updateTs', 'createTs']:
                spec[name] = str(int(value) + 1)
            elif isinstance(value, basestring):
                spec[name] = 'NEW_' + value
        payload = self.agent.make_json_payload_from_object(spec)
        expectUpdate = dict(spec)

        # The actual update is determined by front50.
        # The createTs we gave ignored.
        # As before, the name is upper-cased.
        del expectUpdate['updateTs']
        expectUpdate['createTs'] = self.initial_app_spec['createTs']
        expectUpdate['name'] = self.initial_app_spec['name'].upper()
        self.app_history.insert(0, expectUpdate)

        # TODO(ewiseblatt) 20160524:
        # Add clauses that observe Front50 to verify the history method works
        # and that the get method is the current version.
        num_versions = 2 if self.versioning_enabled else 1
        gcs_builder = gcp.GcpStorageContractBuilder(self.gcs_observer)
        (gcs_builder.new_clause_builder(
            'Google Cloud Storage Contains File',
            retryable_for_secs=5).list_bucket(
                self.BUCKET,
                '/'.join([self.BASE_PATH, 'applications', self.TEST_APP]),
                with_versions=True).contains_path_value('name',
                                                        self.TEST_APP,
                                                        min=num_versions,
                                                        max=num_versions))
        (gcs_builder.new_clause_builder(
            'Updated File Content', retryable_for_secs=5).retrieve_content(
                self.BUCKET,
                '/'.join([
                    self.BASE_PATH, 'applications', self.TEST_APP,
                    'specification.json'
                ]),
                transform=json.JSONDecoder().decode).contains_match({
                    key: jp.EQUIVALENT(value)
                    for key, value in expectUpdate.items()
                }))

        for clause in gcs_builder.build().clauses:
            contract.add_clause(clause)

        f50_builder = st.http_observer.HttpContractBuilder(self.agent)
        (f50_builder.new_clause_builder(
            'History Records Changes').get_url_path(
                '/v2/applications/{app}/history'.format(
                    app=self.TEST_APP)).contains_path_match(
                        '[0]', {
                            key: jp.EQUIVALENT(value)
                            for key, value in self.app_history[0].items()
                        }).contains_path_match(
                            '[1]', {
                                key: jp.EQUIVALENT(value)
                                for key, value in self.app_history[1].items()
                            }))

        for clause in f50_builder.build().clauses:
            contract.add_clause(clause)

        # TODO(ewiseblatt): 20160524
        # Add a mechanism here to check the previous version
        # so that we can verify version recovery as well.
        path = '/'.join(['/v2/applications', self.TEST_APP])
        return st.OperationContract(self.new_patch_operation(
            title='update_app', data=payload, path=path),
                                    contract=contract)
Example #21
0
    def __init__(self, **kwargs):
        super().__init__()

        import json
        self.decoder = json.JSONDecoder(**self.get_opts(kwargs))
Example #22
0
def main():
    args = parse_args()

    # Initialize
    dbmng = DBManager(args.db)

    # Only one session to access to database
    _session = dbmng.get_session()

    # Open the file
    decorder = json.JSONDecoder(object_pairs_hook=collections.OrderedDict)
    input_f = open(args.input_file, 'r')
    input_j = decorder.decode(input_f.read())

    # Load json keys and make it inserted
    nowtime = datetime.datetime.now()

    for key in input_j.keys():
        if key == 'translations':
            for dic in input_j[key]:
                rec = Translation(id=dic['id'],
                                  locale=dic['locale'],
                                  t=dic['t'],
                                  created_at=nowtime,
                                  created_by='rightctl_initializer')
                _session.add(rec)
            # break しないときは、else は使わないほうが良いらしい。
            # else:
            _session.commit()

        elif key == 'rights':
            for dic in input_j[key]:
                rec = Right(action=dic['action'],
                            enable_flag=True,
                            created_at=nowtime,
                            created_by='rightctl_initializer')
                _session.add(rec)
            # break しないときは、else は使わないほうが良いらしい。
            # else:
            _session.commit()

        elif key == 'policies':
            for dic in input_j[key]:
                rec = Policy(id=dic['id'],
                             policy_tid=dic['policy_tid'],
                             created_at=nowtime,
                             created_by='rightctl_initializer')
                _session.add(rec)
                _session.commit()
                for right_s in dic['rights']:
                    right = _session.query(Right).filter(
                        Right.action == right_s).one()
                    child = PolicyHasRight(policy_id=rec.id, right_id=right.id)
                    _session.add(child)
                # break しないときは、else は使わないほうが良いらしい。
                # else:
                _session.commit()
            # break しないときは、else は使わないほうが良いらしい。
            # else:
            _session.commit()

        elif key == 'roles':
            for dic in input_j[key]:
                rec = Role(id=dic['id'],
                           role_tid=dic['role_tid'],
                           created_at=nowtime,
                           created_by='rightctl_initializer')
                _session.add(rec)
                _session.commit()
                for policy_s in dic['policies']:
                    policy = _session.query(Policy).filter(
                        Policy.id == policy_s).one()
                    child = RoleHasPolicy(role_id=rec.id, policy_id=policy.id)
                    _session.add(child)
                # break しないときは、else は使わないほうが良いらしい。
                # else:
                _session.commit()
            # break しないときは、else は使わないほうが良いらしい。
            # else:
            _session.commit()
Example #23
0
import json
from collections import Mapping
from types import GeneratorType

from mo_dots import split_field, startswith_field, relative_field, Data, join_field, Null, wrap
from mo_logs import Log

DEBUG = False

MIN_READ_SIZE = 8 * 1024
WHITESPACE = b" \n\r\t"
CLOSE = {b"{": b"}", b"[": b"]"}
NO_VARS = set()

json_decoder = json.JSONDecoder().decode


def parse(json, query_path, expected_vars=NO_VARS):
    """
    INTENDED TO TREAT JSON AS A STREAM; USING MINIMAL MEMORY WHILE IT ITERATES
    THROUGH THE STRUCTURE.  ASSUMING THE JSON IS LARGE, AND HAS A HIGH LEVEL
    ARRAY STRUCTURE, IT WILL yield EACH OBJECT IN THAT ARRAY.  NESTED ARRAYS
    ARE HANDLED BY REPEATING THE PARENT PROPERTIES FOR EACH MEMBER OF THE
    NESTED ARRAY. DEEPER NESTED PROPERTIES ARE TREATED AS PRIMITIVE VALUES;
    THE STANDARD JSON DECODER IS USED.

    LARGE MANY-PROPERTY OBJECTS CAN BE HANDLED BY `items()`

    :param json:       SOME STRING-LIKE STRUCTURE THAT CAN ASSUME WE LOOK AT
                       ONE CHARACTER AT A TIME, IN ORDER
Example #24
0
def process_wiki_events(wiki, wiki_schedule, workshop_schedule = None, timestamp_offset = None, options = None, fetch_wikitext=True):
    global sessions_complete, warnings, time_stamp_offset

    if not timestamp_offset == None:
        time_stamp_offset = timestamp_offset

    sessions_complete = OrderedDict()
    events_total = 0
    events_successful = 0
    events_in_halls = 0 # aka workshops
    used_guids = []
    debug = options and options.debug

    def warn(msg, force = False):
        global warnings, events_with_warnings, events_in_halls_with_warnings
        if not warnings:
            warnings = True
            events_with_warnings += 1
            if is_workshop_room_session:
                events_in_halls_with_warnings += 1
            
            if is_workshop_room_session or options.show_assembly_warnings or force:
                print('')
                print(event_wiki_name)
                try: print('  at ' + start_time.isoformat() ) 
                except NameError: pass
                try: print('  in ' + room ) 
                except NameError: pass
                print('  ' + wiki_edit_url)
            
        #if not is_workshop_room_session:
        #    msg += ' – at assembly?'
        if is_workshop_room_session or options.show_assembly_warnings or force:
            print(msg)
    
    #for event_wiki_name, event_r in wiki.events.iteritems(): #python2
    for event_wiki_name, event_r in wiki.events.items(): #python3
        
        warnings = False
        sys.stdout.write('.')
        
        try:
            wiki_page_name = event_wiki_name.split('#')[0].replace(' ', '_') # or see fullurl property
            wiki_edit_url = wiki.wiki_url + '/index.php?title=' + urllib.parse.quote_plus(wiki_page_name) + '&action=edit'
            wiki_parsetree_url = wiki.wiki_url + '/api.php?action=parse&format=json&page=' + urllib.parse.quote_plus(wiki_page_name) + '&prop=parsetree'

            session = wiki.parent_of_event(event_wiki_name)
            event = event_r['printouts']
            event_n = None
            events_total += 1
                    
            # One Event take place in multiple rooms...
            # WORKAROND If that is the case just pick the first one
            room = ''
            is_workshop_room_session = False
            if len(event['Has session location']) == 1:
                room = event['Has session location'][0]['fulltext']
                
                if room.split(':', 1)[0] == 'Room':
                    is_workshop_room_session = True
                    room = Wiki.remove_prefix(room)
            
            elif len(event['Has session location']) == 0:
                warn("  has no room yet, skipping...")
                continue
            else:
                warn("  WARNING: has multiple rooms ???, just picking the first one…")
                event['Has session location'] = event['Has session location'][0]['fulltext']
            
            # http://stackoverflow.com/questions/22698244/how-to-merge-two-json-string-in-python
            # This will only work if there are unique keys in each json string.
            #combined = dict(session.items() + event.items()) #python2
            combined = session.copy() #python3 TOOD test if this really leads to the same result
            combined.update(event)
            sessions_complete[event_wiki_name] = combined
            
            if len(event['Has start time']) < 1:
                warn("  has no start time")
                day = None
            else:
                date_time = datetime.fromtimestamp(int(event['Has start time'][0]['timestamp']) + time_stamp_offset)
                start_time = tz.localize(date_time)
                day = wiki_schedule.get_day_from_time(start_time)
    
            #if is_workshop_room_session and day is not None and event['Has duration']:
            if day is not None and event['Has duration']:
                duration = 0
                if event['Has duration']:
                    duration = event['Has duration'][0] 
                    if not isinstance(duration, int) and 'value' in duration:
                        duration = duration['value']

                if duration > 60*24:
                    warn('   event takes longer than 24h, skipping...')
                    continue


                lang = ''
                if session['Held in language'] and len(session['Held in language']) > 0:
                    lang = session['Held in language'][0].split(' - ', 1)[0]

                if len(event['GUID']) > 0:
                    guid = event['GUID'][0]
                    if not isinstance(guid, str):
                        raise Exception('GUID is not string, but ' + guid)
                else:
                    guid = voc.tools.gen_uuid(session['fullurl'] + str(event['Has start time'][0]))
                    warn('   GUID was empty, generated one for now. Not shure if its stable...')
                    #if debug:
                    #    print_json(event['GUID'])
                if guid in used_guids:
                    warn('   GUID {} was already used before, generated a random one for now. Please fix the session wiki page to ensure users can stay subscribed to event!'.format(guid), force=True)
                    guid = voc.tools.gen_uuid(session['fullurl'] + str(event['Has start time'][0]))
                used_guids.append(guid)

                local_id = voc.tools.get_id(guid)

                description = ("\n".join(session['Has description'])).strip()
                if fetch_wikitext:
                    if int(session['Modification date'][0]['timestamp']) > voc.tools.last_edited.get(guid, 0):
                        wiki_text = ''
                        # Retry up to three times
                        for _ in range(3):
                            content_r = requests.get(wiki_parsetree_url, timeout=5)
                            if content_r.ok is True:
                                print("Page {0} requested successfully!".format(wiki_parsetree_url))
                                break
                            print(".")
                        if content_r.ok is False:
                            print("   Requesting {1} failed, HTTP {0}.".format(content_r.status_code, wiki_parsetree_url))
                        else:
                            try:
                                wiki_text_tree = ET.fromstring(voc.tools.parse_json(content_r.text)['parse']['parsetree'].replace('\n',''))
                            except AttributeError:
                                wiki_text_tree = ET.fromstring(voc.tools.parse_json(content_r.text)['parse']['parsetree']['*'].replace('\n',''))
                            except KeyError:
                                print(wiki_parsetree_url)
                            for element in wiki_text_tree.iterfind('template'):
                                if element.tail != None:
                                    wiki_text += html.unescape(element.tail)
                        description = ("\n".join(session['Has description'])).strip() + '\n' + wiki_text
                    else: # unmodified
                        if os.path.isfile("events/{guid}.json".format(guid=guid)):
                            with open("events/{guid}.json".format(guid=guid), "r") as fp:
                                # maintain order from file
                                temp = fp.read()
                                old_event = json.JSONDecoder().decode(temp)
                            description = old_event['description']


                voc.tools.last_edited[guid] = int(session['Modification date'][0]['timestamp'])


                event_n = Event([
                    ('id', local_id),
                    ('guid', guid),
                    ('url', session['fullurl']), # TODO: add enshure_url() which adds "https:"+ prefix when neccessary
                    ('logo', None),
                    ('date', start_time.isoformat()),
                    ('start', start_time.strftime('%H:%M')),
                    #('duration', str(timedelta(minutes=event['Has duration'][0])) ),
                    ('duration', '%d:%02d' % divmod(duration, 60) ),
                    ('room', room),
                    ('slug', '{slug}-{id}-{name}'.format(
                        slug=wiki_schedule.conference()['acronym'].lower(),
                        id=local_id,
                        name=voc.tools.normalise_string(session['wiki_name'].lower())
                    )),
                    ('title', session['Has title'][0]),
                    ('subtitle', "\n".join(event['Has subtitle']) ),
                    ('track', 'self organized sessions'),
                    ('type', " ".join(session['Has session type']).lower()),
                    ('language', lang ),
                    ('abstract', ''),
                    ('description', description),
                    ('persons', [ OrderedDict([
                        ('id', 0),
                        ('url', p['fullurl']), # sometimes a https: is needed...
                        ('public_name', Wiki.remove_prefix(p['fulltext'])), # must be last element so that transformation to xml works
                    ]) for p in session['Is organized by'] ]),
                    ('links', [ OrderedDict([
                        ('url', url), # TODO sometimes a https:// is needed...
                        ('title', url),
                    ]) for url in session['Has website'] ])
                ], start_time)
    
                # Break if conference day date and event date do not match
                conference_day_start = wiki_schedule.day(day).start
                conference_day_end = wiki_schedule.day(day).end
                if not conference_day_start <= event_n.start < conference_day_end:
                    raise Exception("Current conference day from {0} to {1} does not match current event {2} with date {3}."
                        .format(conference_day_start, conference_day_end, event_n["id"], event_n.start))
                
                # Events from day 0 (26. December) do not go into the full schdedule
                if start_time.day != 26 and not only_workshops:      
                    wiki_schedule.add_event(event_n)
                
                if workshop_schedule and is_workshop_room_session:
                    events_in_halls +=1
                    workshop_schedule.add_event(event_n)
                
                events_successful += 1
        except Warning as w:
            warn(w)
        except:
            if 'event_n' in locals(): print(event_n)
            if 'event' in locals(): print(json.dumps(event, indent=2))
            print("  unexpected error: " + str(sys.exc_info()[0]))
            traceback.print_exc()
            if options.exit_when_exception_occours: 
                exit()
            
    store_sos_ids()
    store_last_edited()

    if debug:
        with open("sessions_complete.json", "w") as fp:
            json.dump(sessions_complete, fp, indent=2)

    print("\nFrom %d total events (%d in halls) where %d successful, while %d (%d in halls) produced warnings" % (events_total, events_in_halls, events_successful, events_with_warnings, events_in_halls_with_warnings))
    if not options.show_assembly_warnings:
        print(" (use --show-assembly-warnings cli option to show all warnings)") 
Example #25
0
    def find_book(self, bookid=None):
        resultlist = []

        URL = 'https://www.googleapis.com/books/v1/volumes/' + bookid
        jsonresults = json.JSONDecoder().decode(
            urllib2.urlopen(URL, timeout=30).read())

        try:
            bookdate = item['volumeInfo']['publishedDate']
        except KeyError:
            bookdate = 'Unknown'

        try:
            bookimg = item['volumeInfo']['imageLinks']['thumbnail']
        except KeyError:
            bookimg = 'images/nocover.png'

        try:
            bookrate = item['volumeInfo']['averageRating']
        except KeyError:
            bookrate = 0

        try:
            bookpages = item['volumeInfo']['pageCount']
        except KeyError:
            bookpages = 0

        try:
            bookgenre = item['volumeInfo']['categories']
        except KeyError:
            bookgenre = 'Unknown'

        try:
            bookdesc = item['volumeInfo']['description']
        except KeyError:
            bookdesc = 'Not available'

        try:
            if item['volumeInfo']['industryIdentifiers'][0][
                    'type'] == 'ISBN_10':
                bookisbn = item['volumeInfo']['industryIdentifiers'][0][
                    'identifier']
            else:
                bookisbn = 0
        except KeyError:
            bookisbn = 0

        resultlist.append({
            'bookname': item['volumeInfo']['title'],
            'bookisbn': bookisbn,
            'bookdate': bookdate,
            'booklang': item['volumeInfo']['language'],
            'booklink': item['volumeInfo']['canonicalVolumeLink'],
            'bookrate': float(bookrate),
            'bookimg': bookimg,
            'bookpages': bookpages,
            'bookgenre': bookgenre,
            'bookdesc': bookdesc
        })

        return resultlist
Example #26
0
def load_last_edited():
    if os.path.isfile("_last_edited.json"):
        with open("_last_edited.json", "r") as fp:
            # maintain order from file
            temp = fp.read()
            voc.tools.last_edited = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(temp)
Example #27
0
def yq(input_streams=None, output_stream=None, input_format="yaml", output_format="json",
       program_name="yq", width=None, indentless_lists=False, xml_root=None, xml_dtd=False,
       explicit_start=False, explicit_end=False, jq_args=frozenset(), exit_func=None):
    if not input_streams:
        input_streams = [sys.stdin]
    if not output_stream:
        output_stream = sys.stdout
    if not exit_func:
        exit_func = sys.exit
    converting_output = True if output_format != "json" else False

    try:
        # Note: universal_newlines is just a way to induce subprocess to make stdin a text buffer and encode it for us
        jq = subprocess.Popen(["jq"] + list(jq_args),
                              stdin=subprocess.PIPE,
                              stdout=subprocess.PIPE if converting_output else None,
                              universal_newlines=True)
    except OSError as e:
        msg = "{}: Error starting jq: {}: {}. Is jq installed and available on PATH?"
        exit_func(msg.format(program_name, type(e).__name__, e))

    try:
        if converting_output:
            # TODO: enable true streaming in this branch (with asyncio, asyncproc, a multi-shot variant of
            # subprocess.Popen._communicate, etc.)
            # See https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
            use_annotations = True if output_format == "annotated_yaml" else False
            input_docs = []
            for input_stream in input_streams:
                if input_format == "yaml":
                    loader = get_loader(use_annotations=use_annotations)
                    input_docs.extend(yaml.load_all(input_stream, Loader=loader))
                elif input_format == "xml":
                    import xmltodict
                    input_docs.append(xmltodict.parse(input_stream.read(), disable_entities=True))
                elif input_format == "toml":
                    import toml
                    input_docs.append(toml.load(input_stream))
                else:
                    raise Exception("Unknown input format")
            input_payload = "\n".join(json.dumps(doc, cls=JSONDateTimeEncoder) for doc in input_docs)
            jq_out, jq_err = jq.communicate(input_payload)
            json_decoder = json.JSONDecoder(object_pairs_hook=OrderedDict)
            if output_format == "yaml" or output_format == "annotated_yaml":
                yaml.dump_all(decode_docs(jq_out, json_decoder), stream=output_stream,
                              Dumper=get_dumper(use_annotations=use_annotations, indentless=indentless_lists),
                              width=width, allow_unicode=True, default_flow_style=False,
                              explicit_start=explicit_start, explicit_end=explicit_end)
            elif output_format == "xml":
                import xmltodict
                for doc in decode_docs(jq_out, json_decoder):
                    if xml_root:
                        doc = {xml_root: doc}
                    elif not isinstance(doc, OrderedDict):
                        msg = ("{}: Error converting JSON to XML: cannot represent non-object types at top level. "
                               "Use --xml-root=name to envelope your output with a root element.")
                        exit_func(msg.format(program_name))
                    full_document = True if xml_dtd else False
                    try:
                        xmltodict.unparse(doc, output=output_stream, full_document=full_document, pretty=True,
                                          indent="  ")
                    except ValueError as e:
                        if "Document must have exactly one root" in str(e):
                            raise Exception(str(e) + " Use --xml-root=name to envelope your output with a root element")
                        else:
                            raise
                    output_stream.write(b"\n" if sys.version_info < (3, 0) else "\n")
            elif output_format == "toml":
                import toml
                for doc in decode_docs(jq_out, json_decoder):
                    if not isinstance(doc, OrderedDict):
                        msg = "{}: Error converting JSON to TOML: cannot represent non-object types at top level."
                        exit_func(msg.format(program_name))

                    if USING_PYTHON2:
                        # For Python 2, dump the string and encode it into bytes.
                        output = toml.dumps(doc)
                        output_stream.write(output.encode("utf-8"))
                    else:
                        # For Python 3, write the unicode to the buffer directly.
                        toml.dump(doc, output_stream)
        else:
            if input_format == "yaml":
                loader = get_loader(use_annotations=False)
                for input_stream in input_streams:
                    for doc in yaml.load_all(input_stream, Loader=loader):
                        json.dump(doc, jq.stdin, cls=JSONDateTimeEncoder)
                        jq.stdin.write("\n")
            elif input_format == "xml":
                import xmltodict
                for input_stream in input_streams:
                    json.dump(xmltodict.parse(input_stream.read(), disable_entities=True), jq.stdin)
                    jq.stdin.write("\n")
            elif input_format == "toml":
                import toml
                for input_stream in input_streams:
                    json.dump(toml.load(input_stream), jq.stdin)
                    jq.stdin.write("\n")
            else:
                raise Exception("Unknown input format")

            jq.stdin.close()
            jq.wait()
        for input_stream in input_streams:
            input_stream.close()
        exit_func(jq.returncode)
    except Exception as e:
        exit_func("{}: Error running jq: {}: {}.".format(program_name, type(e).__name__, e))
Example #28
0
def test_cli(node_factory):
    l1 = node_factory.get_node()

    out = subprocess.check_output([
        'cli/lightning-cli',
        '--lightning-dir={}'.format(l1.daemon.lightning_dir), 'help'
    ]).decode('utf-8')
    # Test some known output.
    assert 'help [command]\n    List available commands, or give verbose help on one {command}' in out

    # Test JSON output.
    out = subprocess.check_output([
        'cli/lightning-cli',
        '--lightning-dir={}'.format(l1.daemon.lightning_dir), '-J', 'help'
    ]).decode('utf-8')
    j, _ = json.JSONDecoder().raw_decode(out)
    assert j['help'][0]['command'] is not None
    assert j['help'][0]['description'] is not None

    # Test keyword input (autodetect)
    out = subprocess.check_output([
        'cli/lightning-cli',
        '--lightning-dir={}'.format(l1.daemon.lightning_dir), '-J', 'help',
        'command=help'
    ]).decode('utf-8')
    j, _ = json.JSONDecoder().raw_decode(out)
    assert 'help [command]' in j['help'][0]['verbose']

    # Test keyword input (forced)
    out = subprocess.check_output([
        'cli/lightning-cli',
        '--lightning-dir={}'.format(l1.daemon.lightning_dir), '-J', '-k',
        'help', 'command=help'
    ]).decode('utf-8')
    j, _ = json.JSONDecoder().raw_decode(out)
    assert 'help [command]' in j['help'][0]['verbose']

    # Test ordered input (autodetect)
    out = subprocess.check_output([
        'cli/lightning-cli',
        '--lightning-dir={}'.format(l1.daemon.lightning_dir), '-J', 'help',
        'help'
    ]).decode('utf-8')
    j, _ = json.JSONDecoder().raw_decode(out)
    assert 'help [command]' in j['help'][0]['verbose']

    # Test ordered input (forced)
    out = subprocess.check_output([
        'cli/lightning-cli',
        '--lightning-dir={}'.format(l1.daemon.lightning_dir), '-J', '-o',
        'help', 'help'
    ]).decode('utf-8')
    j, _ = json.JSONDecoder().raw_decode(out)
    assert 'help [command]' in j['help'][0]['verbose']

    # Test missing parameters.
    try:
        # This will error due to missing parameters.
        # We want to check if lightningd will crash.
        out = subprocess.check_output([
            'cli/lightning-cli',
            '--lightning-dir={}'.format(l1.daemon.lightning_dir), '-J', '-o',
            'sendpay'
        ]).decode('utf-8')
    except Exception:
        pass

    # Test it escapes JSON completely in both method and params.
    # cli turns " into \", reply turns that into \\\".
    out = subprocess.run([
        'cli/lightning-cli', '--lightning-dir={}'.format(
            l1.daemon.lightning_dir), 'x"[]{}'
    ],
                         stdout=subprocess.PIPE)
    assert 'Unknown command \'x\\\\\\"[]{}\'' in out.stdout.decode('utf-8')

    subprocess.check_output([
        'cli/lightning-cli',
        '--lightning-dir={}'.format(l1.daemon.lightning_dir), 'invoice',
        '123000', 'l"[]{}', 'd"[]{}'
    ]).decode('utf-8')
    # Check label is correct, and also that cli's keyword parsing works.
    out = subprocess.check_output([
        'cli/lightning-cli',
        '--lightning-dir={}'.format(l1.daemon.lightning_dir), '-k',
        'listinvoices', 'label=l"[]{}'
    ]).decode('utf-8')
    j = json.loads(out)
    assert only_one(j['invoices'])['label'] == 'l"[]{}'
Example #29
0
def getPics(html):
    match = re.search('var J_photo = (.*);', html)
    if match is not None:
        obj_string = match.group(1)
    obj = json.JSONDecoder().decode(obj_string)
    return obj
Example #30
0
 def __init__(self, **kwargs):
     self.decoder = json.JSONDecoder(**kwargs)
     if  kwargs:
         self.module = 'default'
     else:
         self.module = MODULE