Esempio n. 1
0
    def formdata(self, handler, instance):
        request = handler.request

        id = int(request.REQUEST.get('id', -1))
        if request.method == "POST":
            formdata = load_json(request.POST.get('data', '{}'))
            extra = load_json(request.POST.get('extra', '{}'))
            formname = extra.get('name', 'new form')
            spokes = ",".join(extra.get('spokes', []))

            if id == -1:
                pf = PropertyForm(conf=instance, name=formname,
                                  form=dump_json(formdata),
                                  types=spokes)
                pf.save()
                id = pf.id
            else:
                pf = PropertyForm.objects.get(pk=id)
                pf.name = formname
                pf.form = dump_json(formdata)
                pf.types = spokes
                pf.save()

        if id != -1:
            pf = PropertyForm.objects.get(pk=id)
            return dict(form=load_json(pf.form),
                        extra=dict(name=pf.name, id=pf.id,
                                   spokes=pf.types.split(",")))
        return dict(form=[], extra={'name':'', 'spokes':[]})
Esempio n. 2
0
	def backup(self, full = False):
		transfer = self.transfer(newshare = self)
		#print(repr(self.newbackup.predecessor))
		#print(repr(self.reference))
		hostconfig = self.host.config

		info = dict(
			failed = False,
			name = self.name,
			path = self.path,
			mountpoint = self.mountpoint,
		)

		with hostconfig.setenv(self.env):
			self.pre_command(fruitbak = self.fruitbak, host = self.host, backup = self.newbackup, newshare = self)

			info['startTime'] = time_ns()

			with self.hardhat_maker:
				transfer.transfer()

			info['endTime'] = time_ns()

			self.post_command(fruitbak = self.fruitbak, host = self.host, backup = self.newbackup, newshare = self)

		with open('info.json', 'w', opener = self.sharedir_fd.opener) as fp:
			dump_json(info, fp)

		return info
Esempio n. 3
0
def do_context_diff(original, new, dump_as_json=False):
    if dump_as_json:
        return ''.join(context_diff(dump_json(original, indent=2).splitlines(True),
                                    dump_json(new, indent=2).splitlines(True),
                                    'Original', 'New'))
    return ''.join(context_diff(original.splitlines(True),
                                new.splitlines(True),
                                'Original', 'New'))
Esempio n. 4
0
def write(pl, fn):
    "Write a playlist dictionary to file."
    assert valid_playlist(pl)
    if not type(fn) == file:
        plf = codecs.open(fn, 'wb', encoding="utf-8")
    else:
        writer = codecs.getwriter("utf8")
        plf = writer(fn)
    try:
        dump_json(pl, plf)
    finally:
        plf.close()
Esempio n. 5
0
	def backup(self):
		backupdir = self.backupdir
		backupdir_fd = self.backupdir_fd

		flock(backupdir_fd, LOCK_EX|LOCK_NB)

		def onerror(exc):
			raise exc

		for root, dirs, files, root_fd in fwalk(dir_fd = backupdir_fd, topdown = False, onerror = onerror):
			for name in files:
				unlink(name, dir_fd = root_fd)
			for name in dirs:
				rmdir(name, dir_fd = root_fd)

		env = self.env
		config = self.config
		shares_info = {}
		info = dict(level = self.level, failed = False, shares = shares_info)

		with config.setenv(env):
			self.pre_command(fruitbak = self.fruitbak, host = self.host, newbackup = self)

			info['startTime'] = time_ns()

			for share_config in self.shares:
				combined_config = config.copy()
				combined_config.update(share_config)
				share = NewShare(config = combined_config, newbackup = self)
				shares_info[share.name] = share.backup()

			self.agent.sync()

			info['endTime'] = time_ns()

			self.post_command(fruitbak = self.fruitbak, host = self.host, newbackup = self)

		with open('info.json', 'w', opener = backupdir_fd.opener) as fp:
			dump_json(info, fp)

		hostdir_fd = self.host.hostdir_fd

		self.hashes_fp.close()
		Hashset.sortfile('hashes', self.fruitbak.hash_size, dir_fd = backupdir_fd)

		rename('new', str(self.index), src_dir_fd = hostdir_fd, dst_dir_fd = hostdir_fd)

		return info
def verify_node(node):
    node_string = dump_json(node)
    variables = None
    secrets = None
    if upload_variables:
        variables_in_conf = regex_findall(r'\$ENV\((\S*?)\)',
                                          node_string)  # Find env vars
        variables: dict = load_json(
            open(git_cloned_dir + sync_root + 'node/' + var_file_path).read())
        for var in variables_in_conf:  # Verify they exist in git repo
            if var not in variables:
                logging.error(
                    f'Missing env var {var} in variables file {var_file_path}')
    if upload_secrets:
        secrets_in_conf = regex_findall(r'\$SECRET\((\S*?)\)',
                                        node_string)  # Find secrets
        vault = Vaulter(vault_url, vault_git_token,
                        vault_mounting_point)  # Create keyvault object
        secrets: dict = vault.get_secrets(
            secrets_in_conf)  # Get the secrets from keyvault
        if vault.verify() is False:  # Verify all secrets exist.
            logging.error(
                f'These secrets do not exist in the vault {vault.get_missing_secrets()}'
            )

    return variables, secrets
 def to_geos(json_dict_polygon):
     """Return GEOSGeometry polygon from polygon dictionary"""
     json_dict_polygon = json_dict_polygon['geometry']
     geo = GEOSGeometry(dump_json(json_dict_polygon))
     geo.set_srid(original_project)
     geo.transform(desired_projection)
     return geo
def check_and_replace_orchestrator_systems():
    for old_filename in os.listdir(sesam_checkout_dir + "/unpacked/systems/"):
        with open(
                os.path.join(sesam_checkout_dir + "/unpacked/systems/",
                             old_filename), 'r') as f:  # open in readonly mode
            old_file = load_json(f.read())
            try:
                old_file["metadata"]["orchestrator"]["original_configuration"]
                for new_filename in os.listdir(git_cloned_dir +
                                               "/sesam-node/systems/"):
                    with open(
                            os.path.join(
                                git_cloned_dir + "/sesam-node/systems/",
                                new_filename),
                            'r') as g:  # open in readonly mode
                        new_file = load_json(g.read())
                        if old_file["metadata"]["orchestrator"][
                                "original_configuration"] == new_file:
                            logging.info(
                                "The system %s is restored to orchestrator mode"
                                % new_file["_id"])
                            with open(
                                    os.path.join(payload_dir + "/systems/",
                                                 new_filename), 'w') as h:
                                h.write(dump_json(old_file))
            except KeyError:
                None
Esempio n. 9
0
def json_stringify(data):
    """
    :type data: object
    :rtype: str
    """
    return dump_json(data, sort_keys=True,
                     separators=(',', ':')).encode("utf-8") if data else None
Esempio n. 10
0
    def generate_config(self):
        Sample = [{
            "sass": {
                "source": "sass/style.scss",
                "output": "style.min.css",
                "output_style": "compressed",
                "source_comments": False
            }
        }, {
            "js": {
                "include": ["js/fo.js", "js/bar.js"],
                "output": "script.min.js"
            }
        }]

        # Save
        String = dump_json(Sample, indent="\t")
        Filepath = path.join(self.BasePath, "eleran.json")

        # Print
        echo_click(" * Creating config file to", Filepath)

        # Save
        FileWrite = open(Filepath, "w")
        FileWrite.write(String)
        FileWrite.close()
Esempio n. 11
0
 async def publish(self, topic, payload, retain=False):
     payload = (
         dump_json(payload) if isinstance(payload, dict) else str(payload)
     )
     _LOGGER.debug(f"Publishing on {topic}: {payload}")
     await self.mqtt.publish(topic, payload.encode("utf-8"), retain=retain)
     _LOGGER.debug(f"Published on {topic}: {payload}")
Esempio n. 12
0
    def query_matchingrule(self, mr_name, json=False):
        """Returns a single matching rule instance that matches the mr_name.
        Returns None if the matching rule doesn't exist.

        @param mr_name - The name of the matching rule you want to query.

        return MatchingRule or None

        <ldap.schema.models.MatchingRule instance>
        """
        matchingRules = self.get_matchingrules()
        matchingRule = [
            mr for mr in matchingRules
            if mr_name.lower() in list(map(str.lower, mr.names))
        ]
        if len(matchingRule) != 1:
            # This is an error.
            if json:
                raise ValueError('Could not find matchingrule: ' +
                                 objectclassname)
            else:
                return None
        matchingRule = matchingRule[0]
        if json:
            result = {'type': 'schema', 'mr': vars(matchingRule)}
            return dump_json(result)
        else:
            return matchingRule
Esempio n. 13
0
def list_all(inst, basedn, log, args):
    log = log.getChild('list_all')
    schema = Schema(inst)
    json = False
    if args is not None and args.json:
        json = True

    objectclass_elems = schema.get_objectclasses(json=json)
    attributetype_elems = schema.get_attributetypes(json=json)
    matchingrule_elems = schema.get_matchingrules(json=json)

    if json:
        print(
            dump_json(
                {
                    'type': 'schema',
                    'objectclasses': objectclass_elems,
                    'attributetypes': attributetype_elems,
                    'matchingrules': matchingrule_elems
                },
                indent=4))
    else:
        separator_line = "".join(["-" for _ in range(50)])
        print("Objectclasses:\n", separator_line)
        for oc in objectclass_elems:
            print(oc)
        print("AttributeTypes:\n", separator_line)
        for at in attributetype_elems:
            print(at)
        print("MathingRules:\n", separator_line)
        for mr in matchingrule_elems:
            print(mr)
Esempio n. 14
0
    def query_objectclass(self, objectclassname, json=False):
        """Returns a single ObjectClass instance that matches objectclassname.
        Returns None if the objectClass doesn't exist.

        @param objectclassname - The name of the objectClass you want to query.

        return ObjectClass or None

        ex. query_objectclass('account')
        <ldap.schema.models.ObjectClass instance>
        """
        objectclasses = self.get_objectclasses()

        objectclass = [
            oc for oc in objectclasses
            if objectclassname.lower() in list(map(str.lower, oc.names))
        ]
        if len(objectclass) != 1:
            # This is an error.
            if json:
                raise ValueError('Could not find objectcass: ' +
                                 objectclassname)
            else:
                return None
        objectclass = objectclass[0]
        if json:
            result = {'type': 'schema', 'oc': vars(objectclass)}
            return dump_json(result)
        else:
            return objectclass
Esempio n. 15
0
def properties_data_handler(handler, request, action):
    """ return relevant form data """
    ## return first form that matches. TODO: combine forms into groups

    ## combine with data

    # import pdb; pdb.set_trace()
    spoke = handler.spoke()
    if request.method == "POST":
        data = load_json(request.POST.get('data'))
        for (id, formdata) in data.iteritems():
            form_obj_storage, _ = PropertyFormData.objects.get_or_create(form_id=id, content=spoke.instance)
            form_obj_storage.properties = dump_json(formdata)
            form_obj_storage.save()

    spokename = handler.spoke().name()
    forms = []
    for pf in PropertyForm.objects.all():
        types = pf.types.split(",")
        if spokename in types:
            forms.append(dict(id=pf.id, name=pf.name, form=load_json(pf.form)))

    data = {}
    for pfd in spoke.instance.properties.all():
        data[pfd.form_id] = load_json(pfd.properties)
    return dict(forms=forms, data=data)
Esempio n. 16
0
 def state(self):
     state = self.instrument.state
     if self.is_lock:
         return (STATE_UNLOCK, STATE_LOCK)[state]
     elif self.is_switch:
         return (STATE_OFF, STATE_ON)[state]
     elif self.is_opening:
         return (STATE_CLOSE, STATE_OPEN)[state]
     elif self.is_safety:
         return (STATE_SAFE, STATE_UNSAFE)[state]
     elif self.is_binary_sensor:
         return (STATE_OFF, STATE_ON)[state]
     elif self.is_position:
         lat, lon = state
         key = self.config.get(CONF_OWNTRACKS_KEY)
         res = dict(_type='location',
                    tid='volvo',
                    t='p',
                    lat=lat,
                    lon=lon,
                    acc=1,
                    tst=int(time()))
         return (dict(_type='encrypted',
                      data=owntracks_encrypt(dump_json(res), key))
                 if key else res)
     else:
         return state
Esempio n. 17
0
    def query_attributetype(self, attributetypename, json=False):
        """Returns a tuple of the AttributeType, and what objectclasses may or
        must take this attributeType. Returns None if attributetype doesn't
        exist.

        @param attributetypename - The name of the attributeType you want to
        query

        return (AttributeType, Must, May) or None

        ex. query_attributetype('uid')
        ( <ldap.schema.models.AttributeType instance>,
         [<ldap.schema.models.ObjectClass instance>, ...],
         [<ldap.schema.models.ObjectClass instance>, ...] )
        """
        # First, get the attribute that matches name. We need to consider
        # alternate names. There is no way to search this, so we have to
        # filter our set of all attribute types.
        objectclasses = self.get_objectclasses()
        attributetypes = self.get_attributetypes()
        attributetypename = attributetypename.lower()

        attributetype = [at for at in attributetypes
                         if attributetypename.lower() in
                         list(map(str.lower, at.names))]
        if len(attributetype) != 1:
            # This is an error.
            if json:
                raise ValueError('Could not find attribute: ' + attributetypename)
            else:
                return None
        attributetype = attributetype[0]
        # Get the primary name of this attribute
        attributetypename = attributetype.names[0]
        # Build a set if they have may.
        may = [oc for oc in objectclasses if attributetypename.lower() in
               list(map(str.lower, oc.may))]
        # Build a set if they have must.
        must = [oc for oc in objectclasses if attributetypename.lower() in
                list(map(str.lower, oc.must))]

        if json:
            # convert Objectclass class to dict, then sort each list
            may = [vars(oc) for oc in may]
            must = [vars(oc) for oc in must]
            # Add normalized 'name' for sorting
            for oc in may:
                oc['name'] = oc['names'][0].lower()
            for oc in must:
                oc['name'] = oc['names'][0].lower()
            may = sorted(may, key=itemgetter('name'))
            must = sorted(must, key=itemgetter('name'))
            result = {'type': 'schema',
                      'at': vars(attributetype),
                      'may': may,
                      'must': must}
            return dump_json(result)
        else:
            return (attributetype, must, may)
Esempio n. 18
0
def data(request):
    redis = get_redis_connection('cirrus')

    data_id = request.GET.get('uuid', '')
    df = _get_data(data_id)

    key = KEY_DIGEST_PREFIX + data_id
    header = load_json(redis.hget(key, 'header'))

    new_limits = request.GET.get('new_limits', None)
    if new_limits:
        name, limits_lower, limits_upper = new_limits.split('|')
    else:
        name = None
    for row in header:
        if row['name'] == name:
            row['limits'] = [limits_lower, limits_upper]
            redis.hset(key, 'header', dump_json(header))
            
        if row['limits'][0] != '':
            if row['type'].startswith('float'):
                lim = float(row['limits'][0])
            elif row['type'].startswith('int'):
                lim = int(row['limits'][0])
            df = df[df[row['name']] >= lim]
        elif row['limits'][1] != '':
            if row['type'].startswith('float'):
                lim = float(row['limits'][1])
            elif row['type'].startswith('int'):
                lim = int(row['limits'][1])
            df = df[df[row['name']] <= lim]
    
    print (df)
    orders = request.GET.get('order', '')
    orders = [row for row in orders.split(',') if row]
    if orders:
        sort_func = request.GET.get('sort', 'quicksort')
        ascending = [
            row[0] != '-'
            for row in orders
        ]
        by = [
            row[1:] if row[0] == '-' else row
            for row in orders
        ]
        df.sort_values(by=by, ascending=ascending, inplace=True, kind=sort_func)
    
    columns = {}
    for row in df.columns:
        if str(df[row].dtype).startswith('float'):
            columns[row] = [(i if isfinite(i) else None) for i in df[row]]
        else:
            columns[row] = [i for i in df[row]]
    
    return {
        'index': [row for row in df.index],
        'data': columns,
        'order': orders
    }
Esempio n. 19
0
def get_cli():
    print("Fetching")
    sleep(0.5)
    data = m.get_new_lines()
    res = make_response(dump_json({"message": data}), 200)
    print("fetched", data)

    return res
Esempio n. 20
0
 def publish(self, topic, payload, retain=False):
     payload = dump_json(payload) if isinstance(payload, dict) else payload
     _LOGGER.debug(f'Publishing on {topic}: {payload}')
     res, mid = self.client.publish(topic, payload, retain=retain)
     if res == MQTT_ERR_SUCCESS:
         Entity.pending[mid] = (topic, payload)
     else:
         _LOGGER.warning('Failure to publish on %s', topic)
Esempio n. 21
0
def list_attributetypes(inst, basedn, log, args):
    log = log.getChild('list_attributetypes')
    schema = Schema(inst)
    if args is not None and args.json:
        print(dump_json(schema.get_attributetypes(json=True), indent=4))
    else:
        for attributetype in schema.get_attributetypes():
            print(attributetype)
Esempio n. 22
0
def write_report(world):
    path = 'output/report.json'
    if not os.path.exists(path):
        os.mkdir('output')
        with open(path, 'w') as f:
            pass
    with open(path, 'w') as f:
        f.write(dump_json(world.env.data))
Esempio n. 23
0
def list_objectclasses(inst, basedn, log, args):
    log = log.getChild('list_objectclasses')
    schema = Schema(inst)
    if args is not None and args.json:
        print(dump_json(schema.get_objectclasses(json=True), indent=4))
    else:
        for oc in schema.get_objectclasses():
            print(oc)
Esempio n. 24
0
def json_stringify(data):
    """
    :type data: object
    :rtype: str
    """
    # TODO remove this unused function
    return dump_json(data, sort_keys=True,
                     separators=(',', ':')).encode("utf-8") if data else None
Esempio n. 25
0
 async def publish(self, topic, payload, retain=False):
     payload = (dump_json(payload)
                if isinstance(payload, dict) else str(payload))
     _LOGGER.debug("Publishing on %s: %s", topic, payload)
     await self.client.publish(topic,
                               payload.encode("utf-8"),
                               retain=retain)
     _LOGGER.debug("Published on %s: %s", topic, payload)
Esempio n. 26
0
def list_matchingrules(inst, basedn, log, args):
    log = log.getChild('list_matchingrules')
    schema = Schema(inst)
    if args is not None and args.json:
        print(dump_json(schema.get_matchingrules(json=True), indent=4))
    else:
        for mr in schema.get_matchingrules():
            print(mr)
Esempio n. 27
0
 def publish(self, mqtt, topic, payload, retain=False):
     payload = dump_json(payload) if isinstance(payload, dict) else payload
     _LOGGER.debug(f'Publishing on {topic}: {payload}')
     res, mid = mqtt.publish(topic, payload, retain=retain)
     if res == paho.MQTT_ERR_SUCCESS:
         Entity.subscriptions[mid] = (topic, payload)
     else:
         _LOGGER.warning('Failure to publish on %s', topic)
Esempio n. 28
0
def write_report(world):
    report_directory = 'report/'
    with open(report_directory + 'report.json', 'w') as f:
        f.write(dump_json(world.env.data))
    with open(report_directory + 'propagation-time.csv', 'w') as f:
        f.write('connection, count, sum, average\n')
        for connection in world.env.data['block_propagation']:
            propagation_values = world.env.data['block_propagation'][connection]
            if len(propagation_values) > 0:
                sum = 0
                for i in propagation_values:
                    sum = + propagation_values[i]
                avg = sum / len(propagation_values)
                f.write(connection + ', ' + str(len(propagation_values)) + ', ' + str(sum) + ', ' + str(avg) + '\n')
        # f.write(dump_json(world.env.data['block_propagation']))
    # TODO: remove putting data in the memory
    with open(report_directory + 'verification-time.csv', 'w') as f:
        f.write(dump_json(world.env.data['block_verification']))
    db = DBConnection()
    verification = db.getAllBlock_verification()
    with open(report_directory + 'verification-time-db.csv', 'w') as f:
        for ver in verification:
            f.write(str(ver))


    vf_node = {}
    with open(report_directory + 'block-verification-time.csv', 'w') as f:
        # f.write(str(world.report_verification_time()))
        writer = csv.writer(f)
        for block_vf in world.report_verification_time():
            split = block_vf.split(':')
            writer.writerow([split[1]])
            if split[0] in vf_node:
                value = vf_node[split[0]]
                split_value = value.split(',')
                split_value[0] = str(int(split_value[0]) + 1)
                split_value[1] = str(float(split[1]) + float(split_value[1]))
                split_value[2] = str(float(split_value[1]) / float(split_value[0]))
                vf_node[split[0]] = split_value[0] + ', ' + split_value[1] + ', ' + split_value[2]
            else:
                vf_node[split[0]] = '1, ' + split[1] + ', ' + split[1]
    with open(report_directory + 'node-verification-time-average.csv', 'w') as f:
        f.write('node, count, sum, average\n')
        for block_vf in vf_node:
            f.write(block_vf + ', ' + vf_node[block_vf] + '\n')
Esempio n. 29
0
def get_syntaxes(inst, basedn, log, args):
    log = log.getChild('get_syntaxes')
    schema = Schema(inst)
    result = schema.get_attr_syntaxes(json=args.json)
    if args.json:
        print(dump_json(result, indent=4))
    else:
        for id, name in result.items():
            print("%s (%s)", name, id)
Esempio n. 30
0
async def hello(uri):
    print(uri)
    async with websockets.connect(uri) as websocket:

        # Subscribe KBar
        data = {
            'action': 'subscribe',
            'subscribe': 'kbar',
            'kbar': '5min',
            'pair': 'eth_btc'
        }
        data = dump_json(data)
        await websocket.send(data)

        sleep(0.1)
        # Subscribe Trade
        data = {'action': 'subscribe', 'subscribe': 'trade', 'pair': 'eth_btc'}
        data = dump_json(data)
        await websocket.send(data)

        sleep(0.1)
        # Subscribe Depth
        data = {
            'action': 'subscribe',
            'subscribe': 'depth',
            'depth': 10,
            'pair': 'eth_btc'
        }
        data = dump_json(data)
        await websocket.send(data)

        sleep(0.1)
        # Subscribe Tick
        data = {'action': 'subscribe', 'subscribe': 'tick', 'pair': 'eth_btc'}
        data = dump_json(data)
        await websocket.send(data)

        async for message in websocket:
            message = load_json(message)
            if 'ping' in message:
                data = {'action': 'pong', 'pong': message['ping']}
                await websocket.send(dump_json(data))
            else:
                print(message)
Esempio n. 31
0
def query_objectclass(inst, basedn, log, args):
    log = log.getChild('query_objectclass')
    schema = Schema(inst)
    # Need the query type
    oc = _get_arg(args.name, msg="Enter objectclass to query")
    result = schema.query_objectclass(oc, json=args.json)
    if args.json:
        print(dump_json(result, indent=4))
    else:
        print(result)
Esempio n. 32
0
def query_matchingrule(inst, basedn, log, args):
    log = log.getChild('query_matchingrule')
    schema = Schema(inst)
    # Need the query type
    attr = _get_arg(args.name, msg="Enter attribute to query")
    result = schema.query_matchingrule(attr, json=args.json)
    if args.json:
        print(dump_json(result, indent=4))
    else:
        print(result)
Esempio n. 33
0
def format_json(w_file, cursor, single=False, **kwargs):
    entries = []
    resource = {
        "meta": {},
        "data": entries,
        "jsonapi": {
            "version": "1.0"
        },
    }
    if cursor.label:
        resource['meta']['type'] = cursor.label

    for row in cursor:
        entries.append(row)

    if single and len(entries) < 1:
        raise errors.UnknownResource()
    elif single and len(entries) > 0:
        resource['data'] = entries[0]
    dump_json(resource, w_file, cls=ModelEncoder)
Esempio n. 34
0
    def __call__(self):
        year, month = self.yearMonth
        fn = b'{0}-{1}-posts-{2:4d}{3:02d}.json'.format(
            self.siteInfo.id, self.groupInfo.id, year, month)
        self.request.response.setHeader(b'Content-Disposition',
                                        b'inline; filename=' + fn)
        self.request.response.setHeader(b'Content-type', b'application/json')

        posts = self.query.posts_for_month(self.siteInfo.id, self.groupInfo.id,
                                           year, month)
        retval = dump_json(posts)
        return retval
Esempio n. 35
0
 def state(self):
     key = self.config.get('owntracks_key')
     res = dict(_type='location',
                tid='volvo',
                t='p',
                lat=self.vehicle.position['latitude'],
                lon=self.vehicle.position['longitude'],
                acc=1,
                tst=int(time()))
     return (dict(_type='encrypted',
                  data=owntracks_encrypt(dump_json(res), key))
             if key else res)
Esempio n. 36
0
 async def _http(self, callback, method, url,
                 params=None, json=None,
                 headers=None, files=None):
     data = None
     if files:
         data = files
     elif json:
         data = dump_json(json)
     data = files if files else data
     async with self.request(method, url,
                             params=params,
                             headers=headers,
                             data=data) as response:
         body = await response.text(encoding='utf-8')
         return callback(Response(response.status, response.headers, body,
                                  method, url))
Esempio n. 37
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--root", required=True, help="Root path of Turbulenz Engine")
    parser.add_argument("--assets-path", required=True, action="append", help="Path to root of source assets")
    parser.add_argument("--build-path", default=path_join("_build", "assets"), help="Path for intermediate build files")
    parser.add_argument("--install-path", default="staticmax", help="Path to install output assets into")
    parser.add_argument("--verbose", action="store_true")
    parser.add_argument("--imagemagick-convert", help="Path to ImageMagick convert executable (enables TGA support)")

    args = parser.parse_args(argv[1:])

    assets_paths = [normpath(p) for p in args.assets_path]
    base_build_path = normpath(args.build_path)
    build_paths = [
        path_join(base_build_path, "textures"),
        path_join(base_build_path, "models"),
        path_join(base_build_path, "sounds"),
        path_join(base_build_path, "materials"),
        path_join(base_build_path, "shaders"),
        path_join(base_build_path, "fonts"),
        path_join(base_build_path, "videos"),
    ]
    create_dir(base_build_path)
    for path in build_paths:
        create_dir(path)

    create_dir(args.install_path)

    tools = Tools(args, base_build_path)

    with open("deps.yaml", "rt") as f:
        asset_build_info = load_yaml(f.read())
        if asset_build_info:
            asset_build_info = [AssetInfo(asset_info) for asset_info in asset_build_info]
        else:
            asset_build_info = []

    try:
        with open(path_join(base_build_path, "sourcehashes.json"), "rt") as f:
            source_list = SourceList(load_json(f.read()), assets_paths)
    except IOError:
        if args.verbose:
            print "No source hash file"
        source_list = SourceList({}, assets_paths)

    try:
        assets_rebuilt = 0
        for asset_info in asset_build_info:
            rebuild = build_asset(asset_info, source_list, tools, base_build_path, args.verbose)
            if rebuild:
                assets_rebuilt += 1
    except CalledProcessError as e:
        error("Tool failed - %s" % str(e))
        return 1
    except IOError as e:
        error(str(e))

    with open(path_join(base_build_path, "sourcehashes.json"), "wt") as f:
        f.write(dump_json(source_list.get_hashes()))

    print "Installing assets and building mapping table..."
    mapping = install(asset_build_info, args.install_path)

    with open("mapping_table.json", "wt") as f:
        f.write(dump_json({"urnmapping": mapping}))

    remove_old_build_files(asset_build_info, build_paths)

    print "%d assets rebuilt" % assets_rebuilt
    print "Assets build complete"
Esempio n. 38
0
 def post(self, url, json):
     return Response(self._client.post(url, data=dump_json(json), content_type='application/json'))
Esempio n. 39
0
 def render(self, controller, data, request, response):
     response.type = 'application/json'
     return dump_json(data)
Esempio n. 40
0
def main():
    parser = optparse.OptionParser(
        usage="Usage: %prog [options] course_code")

    parser.add_option('-v', '--verbose',
                      dest="verbose",
                      default=False,
                      action="store_true",
                      help="Print lots of output to stdout"
    )
    parser.add_option('--canvasid', dest='canvasid',
                      help="Canvas id for the course (or use lms api)"
    )
    parser.add_option('--dir', dest='dumpdir', default='dump',
                      help="Directory to read dumps from"
    )
    parser.add_option('--nop', dest='nop', default=False, action='store_true',
                      help="Only show canvas course for course round."
    )

    options, args = parser.parse_args()
    if options.canvasid and len(args) != 1:
        parser.error("Exactly one course_code is required when giving canvas id")
    elif len(args) == 0:
        parser.error("At least one course_code is required")

    for course_code in args:
        course_id = options.canvasid or find_canvas_id(course_code)
        if not course_id:
            print("Canvas course id not given or found")
            exit(1)
        dumpdir = options.dumpdir
        if options.verbose:
            print("Upload to %s (canvas #%s) from %s" % (
                course_code, course_id, dumpdir))
        if options.nop:
            continue

        course_code = course_code[:6]
        with open('%s/%s/pages.json' % (dumpdir, course_code)) as json:
            dumpdata = parse_json(json)

        uploaded_files = {}
        for data in dumpdata:
            if options.verbose:
                print("Should upload", data)

            # Use the Canvas API to insert the page
            #POST /api/v1/courses/:course_id/pages
            #    wiki_page[title]
            #    wiki_page[body]
            #    wiki_page[published]
            html = BeautifulSoup(open("%s/%s/pages/%s.html" % (dumpdir, course_code, data['slug'])), "html.parser")
            for link in html.findAll(href=True):
                linkdata = next(filter(lambda i: i['url'] == link['href'], data['links']), None)
                if linkdata and linkdata.get('category') == 'file':
                    canvas_url = uploaded_files.get(link['href'])
                    if not canvas_url:
                        canvas_url = create_file(course_id, '%s/%s/pages/%s' % (dumpdir, course_code, linkdata['url']),
                                                 basename(linkdata['url']))
                        print("Uploaded %s to %s for link" % (link['href'], canvas_url))
                        uploaded_files[link['href']] = canvas_url
                    else:
                        print("%s is allready at %s" % (link['href'], canvas_url))
                    link['href'] = canvas_url
                    linkdata['url'] = canvas_url

            for img in html.findAll('img'):
                imgdata = next(filter(lambda i: i['url'] == img.get('src'), data['links']), {})
                if imgdata.get('category') == 'file':
                    canvas_url = uploaded_files.get(img['src'])
                    if not canvas_url:
                        canvas_url = create_file(course_id, '%s/%s/pages/%s' % (dumpdir, course_code, imgdata['url']),
                                                 basename(imgdata['url']))
                        print("Uploaded %s to %s for img" % (img['src'], canvas_url))
                        uploaded_files[img['src']] = canvas_url
                    else:
                        print("%s is allready at %s" % (img['src'], canvas_url))
                    img['src'] = canvas_url
                    imgdata['url'] = canvas_url

            for tex in html.findAll('span', attrs={'role': 'formula', 'data-language': 'tex'}):
                img = html.new_tag('img')
                img['src'] = '/equation_images/' + urlquote(tex.text)
                img['alt'] = tex.text
                img['class'] = tex.get('class')
                tex.replace_with(img)
                if options.verbose:
                    print("Modified formula %s to: %s" % (tex, img))

            url = baseUrl + '%s/pages' % (course_id)
            print("Should post page to", url)
            payload={
                'wiki_page[title]': data['title'],
                'wiki_page[published]': False,
                'wiki_page[body]': str(html)
            }
            if options.verbose:
                print(payload)
            r = requests.post(url, headers = header, data=payload)
            if r.status_code == requests.codes.ok:
                page_response=r.json()
                if options.verbose:
                    print("result of post creating page: %s" % page_response)
                print("Uploaded page to %s" % page_response['html_url'])
                data['url'] = page_response['html_url']
            else:
                print("Failed to upload page %s: %s" % (data['title'], r))
        dumpname = '%s/%s/zzz-import-%s-%s.json' % (
            dumpdir, course_code, course_code, datetime.now().strftime('%Y%m%d-%H%M%S'))
        with open(dumpname, 'w') as json:
            dump_json(dumpdata, json, indent=4)
        result = create_file(course_id, dumpname, basename(dumpname))
        print('Uploaded final result to %s' % result)
Esempio n. 41
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--root', required=True, help="Root path of Turbulenz Engine")
    parser.add_argument('--assets-path', required=True, action='append', help="Path to root of source assets")
    parser.add_argument('--build-path', default=path_join('_build', 'assets'), help="Path for intermediate build files")
    parser.add_argument('--install-path', default='staticmax', help="Path to install output assets into")
    parser.add_argument('--verbose', action='store_true')
    parser.add_argument('--imagemagick-convert', help="Path to ImageMagick convert executable (enables TGA support)")

    args = parser.parse_args(argv[1:])

    assets_paths = [ normpath(p) for p in args.assets_path ]
    base_build_path = normpath(args.build_path)
    build_paths = [
        path_join(base_build_path, 'textures'),
        path_join(base_build_path, 'models'),
        path_join(base_build_path, 'sounds'),
        path_join(base_build_path, 'materials'),
        path_join(base_build_path, 'shaders'),
        path_join(base_build_path, 'fonts'),
        path_join(base_build_path, 'videos'),
    ]
    create_dir(base_build_path)
    for path in build_paths:
        create_dir(path)

    create_dir(args.install_path)

    tools = Tools(args, base_build_path)

    with open('deps.yaml', 'rt') as f:
        asset_build_info = load_yaml(f.read())
        if asset_build_info:
            asset_build_info = [AssetInfo(asset_info) for asset_info in asset_build_info]
        else:
            asset_build_info = []

    try:
        with open(path_join(base_build_path, 'sourcehashes.json'), 'rt') as f:
            source_list = SourceList(load_json(f.read()), assets_paths)
    except IOError:
        if args.verbose:
            print 'No source hash file'
        source_list = SourceList({}, assets_paths)

    try:
        assets_rebuilt = 0
        for asset_info in asset_build_info:
            rebuild = build_asset(asset_info, source_list, tools, base_build_path, args.verbose)
            if rebuild:
                assets_rebuilt += 1
    except CalledProcessError as e:
        error('Tool failed - %s' % str(e))
        return 1
    except IOError as e:
        error(str(e))

    with open(path_join(base_build_path, 'sourcehashes.json'), 'wt') as f:
        f.write(dump_json(source_list.get_hashes()))

    print 'Installing assets and building mapping table...'
    mapping = install(asset_build_info, args.install_path)

    with open('mapping_table.json', 'wt') as f:
        f.write(dump_json({
                'urnmapping': mapping
            }))

    remove_old_build_files(asset_build_info, build_paths)

    print '%d assets rebuilt' % assets_rebuilt
    print 'Assets build complete'
Esempio n. 42
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--root', required=True, help="Root path of Turbulenz Engine")
    parser.add_argument('--assets-path', required=True, action='append', help="Path to root of source assets")
    parser.add_argument('--build-path', default=path_join('_build', 'assets'), help="Path for intermediate build files")
    parser.add_argument('--install-path', default='staticmax', help="Path to install output assets into")
    parser.add_argument('--verbose', action='store_true')
    parser.add_argument('--imagemagick-convert', help="Path to ImageMagick convert executable (enables TGA support)")
    try:
        default_num_threads = multiprocessing.cpu_count()
    except NotImplementedError:
        default_num_threads = 1
    parser.add_argument('-j', '--num-threads', help="Specify how many threads to use for building",
                        default=default_num_threads, type=int)

    args = parser.parse_args(argv[1:])

    assets_paths = [ normpath(p) for p in args.assets_path ]
    base_build_path = normpath(args.build_path)
    create_dir(base_build_path)
    create_dir(args.install_path)

    tools = Tools(args, base_build_path)

    with open('deps.yaml', 'r') as f:
        asset_build_info = load_yaml(f.read())
        if asset_build_info:
            asset_build_info = [AssetInfo(asset_info) for asset_info in asset_build_info]
        else:
            asset_build_info = []

    try:
        with open(path_join(base_build_path, 'sourcehashes.json'), 'r') as f:
            source_list = SourceList(load_json(f.read()), assets_paths)
    except IOError:
        if args.verbose:
            print 'No source hash file'
        source_list = SourceList({}, assets_paths)

    # Ensure all sources are in the source list so that the threads aren't writing to the list
    for a in asset_build_info:
        source_list.get_source(a.path)

    class AssetBuildThread(Thread):
        def __init__(self, asset_list, asset_list_mutex):
            Thread.__init__(self)
            self.asset_list = asset_list
            self.mutex = asset_list_mutex
            self.assets_rebuilt = 0
            self.exit = False
            self.error = None

        def run(self):
            while True:
                if self.exit:
                    return 0
                self.mutex.acquire(True)
                try:
                    # Try and pull the head off the list and if all it's dependencies are already built then
                    # build it. This could iterate down the remaining list in case the head isn't buildable but
                    # things later in the list are
                    asset_info = self.asset_list[0]
                    deps = [ source_list.get_source(path) for path in asset_info.deps if path != asset_info.path ]
                    if any([not d.built for d in deps]):
                        self.mutex.release()
                        sleep(0.01)
                        continue
                    self.asset_list.pop(0)
                    self.mutex.release()
                except IndexError:
                    self.mutex.release()
                    return 0
                try:
                    rebuild = build_asset(asset_info, source_list, tools, base_build_path, args.verbose)
                except CalledProcessError as e:
                    self.error = '%s - Tool failed - %s' % (asset_info.path, str(e))
                    return 1
                except IOError as e:
                    self.error = str(e)
                    return 1

                if rebuild:
                    self.assets_rebuilt += 1

    num_threads = args.num_threads

    # Sort assets by dependencies
    assets_to_build = []
    while len(assets_to_build) != len(asset_build_info):
        num_assets_sorted = len(assets_to_build)
        for asset in asset_build_info:
            if asset in assets_to_build:
                continue
            for dep in asset.deps:
                if dep != asset.path and dep not in [ a.path for a in assets_to_build ]:
                    break
            else:
                assets_to_build.append(asset)
        if num_assets_sorted == len(assets_to_build):
            assets_left = [ a for a in asset_build_info if a not in assets_to_build ]
            error('Detected cyclic dependencies between assets within - \n%s' %
                '\n'.join([ a.path for a in assets_left ]))
            return 1


    # Create and start threads to build the assets in the sorted dependency list
    asset_threads = []
    asset_list_mutex = Lock()
    for t in xrange(num_threads):
        asset_threads.append(AssetBuildThread(assets_to_build, asset_list_mutex))

    for t in xrange(num_threads):
        asset_threads[t].start()

    while any(a.isAlive() for a in asset_threads):
        for t in xrange(num_threads):
            asset_threads[t].join(0.1)
            if not asset_threads[t].isAlive() and asset_threads[t].error:
                # One thread has an error ask all the others to finish asap
                for o in xrange(num_threads):
                    asset_threads[o].exit = True

    # Update the stats on number of assets rebuilt
    assets_rebuilt = 0
    for t in xrange(num_threads):
        assets_rebuilt += asset_threads[t].assets_rebuilt

    # Dump the state of the build for partial rebuilds
    with open(path_join(base_build_path, 'sourcehashes.json'), 'w') as f:
        f.write(dump_json(source_list.get_hashes()))

    # Check if any build threads failed and if so exit with an error
    for t in xrange(num_threads):
        if asset_threads[t].error:
            error(asset_threads[t].error)
            return 1

    # Dump the mapping table for the built assets
    print 'Installing assets and building mapping table...'
    mapping = install(asset_build_info, args.install_path)
    with open('mapping_table.json', 'w') as f:
        f.write(dump_json({
                'urnmapping': mapping
            }))

    # Cleanup any built files no longer referenced by the new mapping table
    remove_old_build_files(asset_build_info, base_build_path)

    print '%d assets rebuilt' % assets_rebuilt
    print 'Assets build complete'
Esempio n. 43
0
def json_stringify(data):
    """
    :type data: object
    :rtype: str
    """
    return dump_json(data, sort_keys=True, separators=(',', ':')).encode("utf-8") if data else None
Esempio n. 44
0
 def get(self, q):
     q = urllib2.unquote(q)
     self.write(dump_json(list(P.q_search.search(query=q))))
Esempio n. 45
0
 def get(self, q):
     q = urllib2.unquote(q)
     self.write(dump_json(list(P.a_search.search(answer=q))))
Esempio n. 46
0
 def get(self, q):
     q = urllib2.unquote(q)
     queries = list(P.q_search.search(query=q))
     answers = list(P.a_search.search(answer=q))
     self.write(dump_json(queries + answers))