def register(self, url, on_signal, on_data, id_=None, name=None, idf_list=None, odf_list=None, accept_protos=None, profile=None, register_callback=None, on_register=None, on_deregister=None, on_connect=None, on_disconnect=None): ''' Register to an IoTtalk server. :param url: the url of Iottalk server :param on_signal: the signal handler :param on_data: the data handler :param id_: the uuid used to identify an application, if not provided, this function generates one and return :param name: the name of the application :param idf_list: the Input Device Feature list of the application. Every element should be a tuple, with the feature name and unit information provided, e.g. ('meow', ('dB')) :param odf_list: the Output Device Feature list of the application. :param accept_protos: the protocols accepted by the application. default is ``['mqtt']``. :param profile: an abitrary json data field :param on_register: the callable function invoked while the registeration succeeded. :param register_callback: this is deprecated, please use ``on_register`` instead. :param on_deregister: the callable function invoked while the deregistration succeeded. :param on_connect: the callable function invoked while the MQTT client connected. Note that this function might be called multiple times if the client keep reconnecting. :param on_disconnect: the callable function invoked while the MQTT client disconnected. Note that this function might be called multiple times if the client lose the connection. :type url: str :type on_signal: Function :type on_data: Function :type id_: str :type name: str :type idf_list: List[Tuple[str, List[str]]] :type odf_list: List[Tuple[str, List[str]]] :type accept_protos: List[str] :type profile: dict :returns: the json object responsed from server if registration succeed :raises: RegistrationError if already registered or registration failed ''' ctx = self.context if ctx.mqtt_client: raise RegistrationError('Already registered') ctx.url = url if _invalid_url(ctx.url): raise RegistrationError('Invalid url: "{}"'.format(ctx.url)) try: ctx.app_id = UUID(id_) if id_ else uuid4() except ValueError: raise RegistrationError('Invalid UUID: {!r}'.format(id_)) body = {} if name: body['name'] = name if idf_list: body['idf_list'] = idf_list if odf_list: body['odf_list'] = odf_list body['accept_protos'] = accept_protos if accept_protos else ['mqtt'] if profile: body['profile'] = profile _reg_msg = 'register_callback is deprecated, please use `on_register` instead.' if on_register and register_callback: raise RegistrationError(_reg_msg) elif on_register: ctx.on_register = on_register elif register_callback: log.warning(_reg_msg) ctx.on_register = register_callback # other callbacks ctx.on_deregister = on_deregister ctx.on_connect = on_connect ctx.on_disconnect = on_disconnect try: response = requests.put( '{}/{}'.format(ctx.url, ctx.app_id), headers={ 'Content-Type': 'application/json', }, data=json.dumps(body) ) if response.status_code != 200: raise RegistrationError(response.json()['reason']) except requests.exceptions.ConnectionError: raise RegistrationError('ConnectionError') except (KeyError, json.JSONDecodeError): raise RegistrationError('Invalid response from server') metadata = response.json() ctx.name = metadata['name'] ctx.mqtt_host = metadata['url']['host'] ctx.mqtt_port = metadata['url']['port'] ctx.i_chans['ctrl'] = metadata['ctrl_chans'][0] ctx.o_chans['ctrl'] = metadata['ctrl_chans'][1] ctx.rev = rev = metadata['rev'] ctx.mqtt_client = mqtt.Client(client_id='iottalk-py-{}'.format(uuid4().hex)) ctx.mqtt_client.on_message = self._on_message ctx.mqtt_client.on_connect = self._on_connect ctx.mqtt_client.on_disconnect = self._on_disconnect ctx.mqtt_client.enable_logger(log) ctx.mqtt_client.will_set( self.context.i_chans['ctrl'], json.dumps({'state': 'offline', 'rev': rev}), retain=True, ) ctx.mqtt_client.connect( self.context.mqtt_host, port=self.context.mqtt_port, ) ctx.mqtt_client.loop_start() ctx.on_signal = on_signal ctx.on_data = on_data try: msg = ctx._mqueue.get(timeout=5) msg.wait_for_publish() except queue.Empty: log.error('MQTT connection timeout') raise log.debug('Online message published') if ctx.on_register: ctx.on_register(self) return ctx
def listImages(self, project, imageAddedRange=None, lastViewedRange=None, viewcountRange=None, numAnnoRange=None, numPredRange=None, orderBy=None, order='desc', startFrom=None, limit=None): ''' Returns a list of images, with ID, filename, date image was added, viewcount, number of annotations, number of predictions, and last time viewed, for a given project. The list can be filtered by all those properties (e.g. date and time image was added, last checked; number of annotations, etc.), as well as limited in length (images are sorted by date_added). ''' queryArgs = [] filterStr = '' if imageAddedRange is not None: #TODO filterStr += ' date_added >= to_timestamp(%s) AND date_added <= to_timestamp(%s) ' queryArgs.append(imageAddedRange[0]) queryArgs.append(imageAddedRange[1]) if lastViewedRange is not None: #TODO filterStr += 'AND last_viewed >= to_timestamp(%s) AND last_viewed <= to_timestamp(%s) ' queryArgs.append(lastViewedRange[0]) queryArgs.append(lastViewedRange[1]) if viewcountRange is not None: filterStr += 'AND viewcount >= %s AND viewcount <= %s ' queryArgs.append(viewcountRange[0]) queryArgs.append(viewcountRange[1]) if numAnnoRange is not None: filterStr += 'AND num_anno >= %s AND numAnno <= %s ' queryArgs.append(numAnnoRange[0]) queryArgs.append(numAnnoRange[1]) if numPredRange is not None: filterStr += 'AND num_pred >= %s AND num_pred <= %s ' queryArgs.append(numPredRange[0]) queryArgs.append(numPredRange[1]) if startFrom is not None: if not isinstance(startFrom, UUID): try: startFrom = UUID(startFrom) except: startFrom = None if startFrom is not None: filterStr += ' AND img.id > %s ' queryArgs.append(startFrom) filterStr = filterStr.strip() if filterStr.startswith('AND'): filterStr = filterStr[3:] if len(filterStr.strip()): filterStr = 'WHERE ' + filterStr filterStr = sql.SQL(filterStr) orderStr = sql.SQL('ORDER BY img.id ASC') if orderBy is not None: orderStr = sql.SQL('ORDER BY {} {}, img.id ASC').format( sql.SQL(orderBy), sql.SQL(order)) limitStr = sql.SQL('') if isinstance(limit, int): limitStr = sql.SQL('LIMIT %s') queryArgs.append(limit) if not len(queryArgs): queryArgs = None queryStr = sql.SQL(''' SELECT img.id, filename, EXTRACT(epoch FROM date_added) AS date_added, COALESCE(viewcount, 0) AS viewcount, EXTRACT(epoch FROM last_viewed) AS last_viewed, COALESCE(num_anno, 0) AS num_anno, COALESCE(num_pred, 0) AS num_pred, img.isGoldenQuestion FROM {id_img} AS img FULL OUTER JOIN ( SELECT image, COUNT(*) AS viewcount, MAX(last_checked) AS last_viewed FROM {id_iu} GROUP BY image ) AS iu ON img.id = iu.image FULL OUTER JOIN ( SELECT image, COUNT(*) AS num_anno FROM {id_anno} GROUP BY image ) AS anno ON img.id = anno.image FULL OUTER JOIN ( SELECT image, COUNT(*) AS num_pred FROM {id_pred} GROUP BY image ) AS pred ON img.id = pred.image {filter} {order} {limit} ''').format(id_img=sql.Identifier(project, 'image'), id_iu=sql.Identifier(project, 'image_user'), id_anno=sql.Identifier(project, 'annotation'), id_pred=sql.Identifier(project, 'prediction'), filter=filterStr, order=orderStr, limit=limitStr) result = self.dbConnector.execute(queryStr, tuple(queryArgs), 'all') for idx in range(len(result)): result[idx]['id'] = str(result[idx]['id']) return result
def test_can_insert_model_with_all_column_types(self): """ Test for inserting all column types into a Model test_can_insert_model_with_all_column_types tests that each cqlengine column type can be inserted into a Model. It first creates a Model that has each cqlengine column type. It then creates a Model instance where all the fields have corresponding data, which performs the insert into the Cassandra table. Finally, it verifies that each column read from the Model from Cassandra is the same as the input parameters. @since 2.6.0 @jira_ticket PYTHON-246 @expected_result The Model is inserted with each column type, and the resulting read yields proper data for each column. @test_category data_types:primitive """ class AllDatatypesModel(Model): id = columns.Integer(primary_key=True) a = columns.Ascii() b = columns.BigInt() c = columns.Blob() d = columns.Boolean() e = columns.DateTime() f = columns.Decimal() g = columns.Double() h = columns.Float() i = columns.Inet() j = columns.Integer() k = columns.Text() l = columns.TimeUUID() m = columns.UUID() n = columns.VarInt() sync_table(self.conn, AllDatatypesModel) input = [ 'ascii', 2**63 - 1, bytearray(b'hello world'), True, datetime.utcfromtimestamp(872835240), Decimal('12.3E+7'), 2.39, 3.4028234663852886e+38, '123.123.123.123', 2147483647, 'text', UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), int(str(2147483647) + '000') ] AllDatatypesModel.create( self.conn, id=0, a='ascii', b=2**63 - 1, c=bytearray(b'hello world'), d=True, e=datetime.utcfromtimestamp(872835240), f=Decimal('12.3E+7'), g=2.39, h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647, k='text', l=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000'), ) self.assertEqual(1, AllDatatypesModel.objects.count(self.conn)) output = AllDatatypesModel.objects().first(self.conn) for i, i_char in enumerate(range(ord('a'), ord('a') + 14)): self.assertEqual(input[i], output[chr(i_char)])
"datetime": datetime.datetime(2020, 4, 9, 14, 19, 24, 832974), "date": datetime.date(2020, 4, 9), "time": datetime.time(14, 19, 24, 832974), "timedelta": datetime.timedelta(seconds=1293, microseconds=124215), }, { "datetime": datetime.datetime(2021, 5, 10, 15, 20, 25, 832856), "date": datetime.date(2021, 5, 10), "time": datetime.time(15, 20, 25, 832856), "timedelta": datetime.timedelta(microseconds=847692), }, ), "dict_complex": { 38632423: { "aaa": { UUID("ac7e231f-7ea1-43ef-a44a-c6e22ac72a87"): [ Decimal("383.23"), Decimal("2432.324"), Decimal("9023.234"), Decimal("93890.234"), Decimal("324963.324"), ], UUID("4c1a575a-66a6-4e7e-b747-18069127f31c"): [ Decimal("7634.34"), Decimal("4813.4862"), Decimal("99230.373"), Decimal("384.9634"), Decimal("567.3934"), ], }, "bbb": {
class Move_Overlayed_Nodes(DiTToLayerBase): name = "move_overlayed_nodes" uuid = UUID("31f61c51-a89a-419c-80ba-23d4b89730eb") version = '0.1.0' desc = "Jiggle the coordinate positions of nodes that are on top of each other" @classmethod def args(cls, model=None): arg_list = super().args() return arg_list @classmethod def kwargs(cls, model=None): kwarg_dict = super().kwargs() kwarg_dict['delta_x'] = Kwarg(default=None, description='Shift in the coordinates X position', parser=None, choices=None, nargs=None, action=None) kwarg_dict['delta_y'] = Kwarg(default=None, description='Shift in the coordinates Y position', parser=None, choices=None, nargs=None, action=None) return kwarg_dict @classmethod def apply(cls, stack, model, *args, **kwargs): random.seed(0) delta_x = 1 # Use 1 as a default delta_y = 0 # Use 0 as a default if 'delta_x' in kwargs: delta_x = kwargs['delta_x'] if 'delta_y' in kwargs: delta_y = kwargs['delta_y'] all_positions = {} all_positions_intermediate = {} for m in model.models: if hasattr(m,'name') and m.name is not None and hasattr(m,'positions') and m.positions is not None: if len(m.positions)==1: # Only look at elements with only one position attribute pos = (float(round(m.positions[0].lat)),float(round(m.positions[0].long))) if pos in all_positions: all_positions[pos].append(m.name) else: all_positions[pos] = [m.name] if len(m.positions)>2: #Intermediate nodes to_remove = set() for i in range(1,len(m.positions)): pos = (float(round(m.positions[i].lat)),float(round(m.positions[i].long))) #x2,y2 pos_prev = (float(round(m.positions[i-1].lat)),float(round(m.positions[i-1].long))) #x1,y1 norm = ((pos[0]-pos_prev[0])**2 + (pos[1]-pos_prev[1])**2)**0.5 if norm !=0: perp = ((pos[1]-pos_prev[1])/norm,(pos_prev[0]-pos[0])/norm) # perpendicular unit vector so that the change ensures the lines are in parallel if pos in all_positions_intermediate: all_positions_intermediate[pos].append((m.positions[i],perp)) #Add model not name as position objects exist without names else: all_positions_intermediate[pos] = [(m.positions[i],perp)] else: to_remove.add(i) new_positions = [] for i in range(1,len(m.positions)): if not i in to_remove: new_positions.append(m.positions[i]) m.positions = new_positions model.build_networkx('st_mat') attrs = nx.get_edge_attributes(model._network.graph,'name') all_neighboring_lines = set() for pos in all_positions: if len(all_positions[pos]) >1: #model._network.build(model,source='st_mat') #model._network.set_attributes(model) #path = model.get_extremal_path(all_positions[pos]) #import pdb;pdb;pdb.set_trace() path = all_positions[pos] subgraph = model._network.graph.subgraph(path) spring = graphviz_layout(subgraph) for i in range(len(path)): ###range1 = random.choice([(-2*delta_x,-0.5*delta_x),(0.5*delta_x,2*delta_x)]) ###range2 = random.choice([(-2*delta_y,-0.5*delta_y),(0.5*delta_y,2*delta_y)]) ###model[path[i]].positions[0].lat += random.uniform(range1[0],range1[1]) ###model[path[i]].positions[0].long += random.uniform(range2[0],range1[1]) factor = 1 # if len(path)>6: # factor = 4 delta_x = 0.065 delta_y = 0.065 model[path[i]].positions[0].lat += spring[path[i]][0]*delta_x *factor model[path[i]].positions[0].long += spring[path[i]][1]*delta_y *factor """ for j in model._network.graph.edges(subgraph.nodes()): if not j in attrs: i = (j[1],j[0]) else: i = j #import pdb;pdb.set_trace() if i in attrs and attrs[i] in model.model_names and isinstance(model[attrs[i]],Line): tmp_line = model[attrs[i]] all_neighboring_lines.add(tmp_line) """ for j in subgraph.edges(): if not j in attrs: i = (j[1],j[0]) else: i = j if i in attrs and attrs[i] in model.model_names and isinstance(model[attrs[i]],Line): model[attrs[i]].positions = [] # Do at end to avoid double counting for tmp_line in all_neighboring_lines: if tmp_line.positions is not None and len(tmp_line.positions)>0: tmp_line.positions = tmp_line.positions[:-1] for pos in all_positions_intermediate: if len(all_positions_intermediate[pos])>1: path = all_positions_intermediate[pos] # import pdb;pdb.set_trace() for i in range(len(path)): m = path[i][0] perp = path[i][1] m.lat +=perp[0]*i m.long +=perp[1]*i model._network.graph = None #So these layouts don't remain permanant return model return model
def guid_str_from_bytes(guid_bytes, endian='le'): if endian.lower() == 'le': return str(UUID(bytes_le=guid_bytes)) else: return str(UUID(bytes=guid_bytes))
def get_duplicates(self, domain, xform_id): if domain not in self.dups_by_domain: self.populate_dup_map(domain) return self.dups_by_domain[domain].get(UUID(xform_id), {})
from enum import Enum from uuid import UUID import pytest from pydantic import BaseModel, create_model from pydantic.json import pydantic_encoder, timedelta_isoformat class MyEnum(Enum): foo = 'bar' snap = 'crackle' @pytest.mark.parametrize('input,output', [ (UUID('ebcdab58-6eb8-46fb-a190-d07a33e9eac8'), '"ebcdab58-6eb8-46fb-a190-d07a33e9eac8"'), (datetime.datetime(2032, 1, 1, 1, 1), '"2032-01-01T01:01:00"'), (datetime.datetime(2032, 1, 1, 1, 1, tzinfo=datetime.timezone.utc), '"2032-01-01T01:01:00+00:00"'), (datetime.datetime(2032, 1, 1), '"2032-01-01T00:00:00"'), (datetime.time(12, 34, 56), '"12:34:56"'), (datetime.timedelta(days=12, seconds=34, microseconds=56), '1036834.000056'), ({1, 2, 3}, '[1, 2, 3]'), (frozenset([1, 2, 3]), '[1, 2, 3]'), ((v for v in range(4)), '[0, 1, 2, 3]'), (b'this is bytes', '"this is bytes"'), (Decimal('12.34'), '12.34'), (create_model('BarModel', a='b', c='d')(), '{"a": "b", "c": "d"}'), (MyEnum.foo, '"bar"'),
import textwrap from pathlib import Path from typing import Dict from uuid import UUID import pytest from mtg_ssm.containers import counts from mtg_ssm.containers.bundles import ScryfallDataSet from mtg_ssm.containers.collection import MagicCollection from mtg_ssm.containers.counts import ScryfallCardCount from mtg_ssm.containers.indexes import Oracle from mtg_ssm.scryfall.models import ScryCard from mtg_ssm.serialization import csv TEST_CARD_ID = UUID("57f25ead-b3ec-4c40-972d-d750ed2f5319") @pytest.fixture(scope="session") def oracle(scryfall_data: ScryfallDataSet) -> Oracle: """Test fixture Oracle with limited data.""" accepted_sets = {"phop", "pmbs"} scryfall_data2 = ScryfallDataSet( sets=[s for s in scryfall_data.sets if s.code in accepted_sets], cards=[c for c in scryfall_data.cards if c.set in accepted_sets], ) return Oracle(scryfall_data2) def test_header() -> None: assert csv.CSV_HEADER == [
def test_from_string_constructor(self): uniqueId = str(uuid.uuid4()) userid = UserId.from_string(uniqueId) self.assertEqual(userid.value, UUID(uniqueId))
help='UUID for split', default=None, ) parser.add_argument( '--hyperparameters', type=str, help='hyperparameters file', required=True, ) parser.add_argument( '--trials', type=int, default=config.TRIALS, help='how many times to run') FLAGS, unparsed = parser.parse_known_args() with open(FLAGS.hyperparameters) as f: parameters = json.load(f) parameters = explode_parameters(parameters) model = models[FLAGS.model] split = FLAGS.split if split is None: split = uuid4() else: split = UUID(split) training, validation, test = data(split, input_form=FLAGS.form, label_form=FLAGS.label) for _ in range(FLAGS.trials): for hyperparameters in parameters: run(model, FLAGS.description, FLAGS.form, FLAGS.label, split, loaded_data=(training, validation, test), hyperparameters=hyperparameters) K.clear_session() print('The split id for this run ' + FLAGS.description + ' is ' + str(split))
def __init__(self, name=None, project_id=None, path=None, controller=None, status="opened", filename=None, auto_start=False, auto_open=False, auto_close=True, scene_height=1000, scene_width=2000, zoom=100, show_layers=False, snap_to_grid=False, show_grid=False, grid_size=75, drawing_grid_size=25, show_interface_labels=False, variables=None, supplier=None): self._controller = controller assert name is not None self._name = name self._auto_start = auto_start self._auto_close = auto_close self._auto_open = auto_open self._status = status self._scene_height = scene_height self._scene_width = scene_width self._zoom = zoom self._show_layers = show_layers self._snap_to_grid = snap_to_grid self._show_grid = show_grid self._grid_size = grid_size self._drawing_grid_size = drawing_grid_size self._show_interface_labels = show_interface_labels self._variables = variables self._supplier = supplier self._loading = False self._closing = False # Disallow overwrite of existing project if project_id is None and path is not None: if os.path.exists(path): raise aiohttp.web.HTTPForbidden( text="The path {} already exist.".format(path)) if project_id is None: self._id = str(uuid4()) else: try: UUID(project_id, version=4) except ValueError: raise aiohttp.web.HTTPBadRequest( text="{} is not a valid UUID".format(project_id)) self._id = project_id if path is None: path = os.path.join(get_default_project_directory(), self._id) self.path = path if filename is not None: self._filename = filename else: self._filename = self.name + ".gns3" self.reset() # At project creation we write an empty .gns3 with the meta if not os.path.exists(self._topology_file()): assert self._status != "closed" self.dump() self._iou_id_lock = asyncio.Lock() log.debug('Project "{name}" [{id}] loaded'.format(name=self.name, id=self._id))
class OneListTierID(Enum): """One List tier IDs.""" tier_d_international_trade_advisers = UUID( '1929c808-99b4-4abf-a891-45f2e187b410')
def _convert_uuid_string_to_int(uuid_string): """Return the integer representation of a UUID string.""" return UUID(uuid_string).int
async def onJoin(self, details): self.log.info('{klass}.onJoin(details={details})', klass=self.__class__.__name__, details=details) try: assert details.authrole == 'member' # WAMP authid on xbrnetwork follows this format: "member-" member_id = details.authid[7:] member_id = UUID(member_id) member_data = await self.call('xbr.network.get_member', member_id.bytes) member_adr = member_data['address'] # delegate ethereum private key object wallet_key = self._ethkey wallet_raw = self._ethkey_raw # delegate ethereum account canonical address wallet_adr = wallet_key.public_key.to_canonical_address() config = await self.call('xbr.network.get_config') status = await self.call('xbr.network.get_status') verifyingChain = config['verifying_chain_id'] verifyingContract = binascii.a2b_hex( config['verifying_contract_adr'][2:]) created = status['block']['number'] # FIXME: Where to get ? terms_hash = config['eula']['hash'] catalog_id = uuid.uuid4().bytes # https://ipfs.infura.io:5001/api/v0/cat?arg=QmenatrFHG1m5YSsRbCmoLyvVU4JX1fkbr9X7XM8FFSr1h meta_hash = 'QmenatrFHG1m5YSsRbCmoLyvVU4JX1fkbr9X7XM8FFSr1h' signature = sign_eip712_catalog_create(wallet_raw, verifyingChain, verifyingContract, wallet_adr, created, catalog_id, terms_hash, meta_hash) # https://xbr.network/docs/network/api.html#xbrnetwork.XbrNetworkApi.onboard_member try: result = await self.call('xbr.network.create_catalog', member_id.bytes, catalog_id, verifyingChain, created, verifyingContract, terms_hash, meta_hash, None, signature, None) self.log.info("create_catalog results:\n\n{result}\n", result=pformat(result)) except ApplicationError as e: self.log.error('ApplicationError: {error}', error=e) self.leave('wamp.error', str(e)) return except Exception as e: raise e assert type(result) == dict assert 'created' in result and type( result['created']) == int and result['created'] > 0 assert 'action' in result and result['action'] == 'create_catalog' assert 'vaction_oid' in result and type( result['vaction_oid']) == bytes and len( result['vaction_oid']) == 16 vaction_oid = uuid.UUID(bytes=result['vaction_oid']) self.log.info( 'Create catalog - verification "{vaction_oid}" created', vaction_oid=vaction_oid) # fd = 'cloud/planet_xbr_crossbar/.crossbar/.verifications' fd = self._verifications if not os.path.isdir(fd): os.mkdir(fd) fn = 'create-catalog-email-verification.{}'.format(vaction_oid) verification_file = os.path.abspath(os.path.join(fd, fn)) with open(verification_file, 'rb') as f: data = f.read() verified_data = cbor2.loads(data) self.log.info('Verified data:\n{verified_data}\n', verified_data=pformat(verified_data)) vaction_code = verified_data['vcode'] self.log.info( 'Verifying member using vaction_oid={vaction_oid}, vaction_code={vaction_code} ..', vaction_oid=vaction_oid, vaction_code=vaction_code) try: result = await self.call('xbr.network.verify_create_catalog', vaction_oid.bytes, vaction_code) except ApplicationError as e: self.log.error('ApplicationError: {error}', error=e) raise e assert type(result) == dict assert 'member_oid' in result and type( result['member_oid']) == bytes and len( result['member_oid']) == 16 assert 'catalog_oid' in result and type( result['catalog_oid']) == bytes and len( result['catalog_oid']) == 16 catalog_oid = result['catalog_oid'] member_oid = result['member_oid'] self.log.info( 'SUCCESS! New XBR FbsRepository Created: member_oid={member_oid}, catalog_oid={catalog_id}, ' 'result: {result}\n', member_oid=uuid.UUID(bytes=member_oid).__str__(), catalog_id=uuid.UUID(bytes=catalog_oid).__str__(), result=pformat(result)) # Lets see if we can now fetch the newly created catalog try: result = await self.call('xbr.network.get_catalog', catalog_oid) except ApplicationError as e: self.log.error('ApplicationError: {error}', error=e) raise e assert type(result) == dict assert 'oid' in result and type( result['oid']) == bytes and result['oid'] == catalog_oid assert 'owner' in result and type( result['owner']) == bytes and result['owner'] == member_adr except Exception as e: self.log.failure() self.config.extra['error'] = e finally: self.leave()
def _formatted_uuid(uuid_string): return str(UUID(uuid_string))
def upload_id(self): return UUID( bytes=base64.urlsafe_b64decode(self.request.GET["upload_id"]))
def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = UUID(value) return value
def characteristic_uuid(self) -> UUID: return UUID('00000095-0000-1000-8000-0026BB765291')
from raiden.utils import typing from raiden.utils.formatting import to_checksum_address from raiden.utils.keys import privatekey_to_address from raiden.utils.typing import ( Address, Any, BlockNumber, BlockTimeout, ChainID, Dict, PaymentAmount, TokenAmount, TokenNetworkAddress, ) DEFAULT_FEEDBACK_TOKEN = UUID("381e4a005a4d4687ac200fa1acd15c6f") def assert_checksum_address_in_url(url): message = "URL does not contain properly encoded address." assert any(is_checksum_address(token) for token in url.split("/")), message def create_square_network_topology( token_network_state, our_address ) -> typing.Tuple[TokenNetworkState, typing.List[typing.Address], typing.List[NettingChannelState]]: address1 = factories.make_address() address2 = factories.make_address() address3 = factories.make_address() address4 = factories.make_address()
def is_uuid(string, version=4): try: UUID(string, version=version) return True except ValueError: return False
def uid(value): # Check input string is UUID4 - raises exception if not uuid_obj = UUID(value, version=4) return str(uuid_obj)
def get_id(self) -> UUID: return UUID("b3c487ea-e670-4801-bcdc-29639bf1269b")
async def test_execute_for_separate_runtime_container(async_test_client, clean_test_db_engine): patched_session = sessionmaker(clean_test_db_engine) with mock.patch( "hetdesrun.persistence.dbservice.nesting.Session", patched_session, ): with mock.patch( "hetdesrun.persistence.dbservice.revision.Session", patched_session, ): with mock.patch( "hetdesrun.webservice.config.runtime_config", is_runtime_service=False, ): resp_mock = mock.Mock() resp_mock.status_code = 200 resp_mock.json = mock.Mock( return_value={ "output_results_by_output_name": { "wf_output": 100 }, "output_types_by_output_name": { "wf_output": "INT" }, "result": "ok", "job_id": "1270547c-b224-461d-9387-e9d9d465bbe1", }) with mock.patch( "hetdesrun.backend.execution.httpx.AsyncClient.post", return_value=resp_mock, ) as mocked_post: tr_component_1 = TransformationRevision( **tr_json_component_1) tr_component_1.content = update_code(tr_component_1) store_single_transformation_revision(tr_component_1) tr_workflow_2 = TransformationRevision( **tr_json_workflow_2_update) store_single_transformation_revision(tr_workflow_2) update_or_create_nesting(tr_workflow_2) exec_by_id_input = ExecByIdInput( id=tr_workflow_2.id, wiring=tr_workflow_2.test_wiring, job_id=UUID("1270547c-b224-461d-9387-e9d9d465bbe1"), ) async with async_test_client as ac: response = await ac.post( "/api/transformations/execute", json=json.loads(exec_by_id_input.json()), ) assert response.status_code == 200 resp_data = response.json() assert "output_types_by_output_name" in resp_data assert "job_id" in resp_data assert UUID(resp_data["job_id"]) == UUID( "1270547c-b224-461d-9387-e9d9d465bbe1") mocked_post.assert_called_once()
def removeImages(self, project, imageList, forceRemove=False, deleteFromDisk=False): ''' Receives an iterable of image IDs and removes them from the project database schema, including associated user views, annotations, and predictions made. Only removes entries if no user views, annotations, and predictions exist, or else if "forceRemove" is True. If "deleteFromDisk" is True, the image files are also deleted from the project directory on the file system. Returns a list of images that were deleted. ''' imageList = tuple([(UUID(i), ) for i in imageList]) queryArgs = [] deleteArgs = [] if forceRemove: queryStr = sql.SQL(''' SELECT id, filename FROM {id_img} WHERE id IN %s; ''').format(id_img=sql.Identifier(project, 'image')) queryArgs = tuple([imageList]) deleteStr = sql.SQL(''' DELETE FROM {id_iu} WHERE image IN %s; DELETE FROM {id_anno} WHERE image IN %s; DELETE FROM {id_pred} WHERE image IN %s; DELETE FROM {id_img} WHERE id IN %s; ''').format(id_iu=sql.Identifier(project, 'image_user'), id_anno=sql.Identifier(project, 'annotation'), id_pred=sql.Identifier(project, 'prediction'), id_img=sql.Identifier(project, 'image')) deleteArgs = tuple([imageList] * 4) else: queryStr = sql.SQL(''' SELECT id, filename FROM {id_img} WHERE id IN %s AND id NOT IN ( SELECT image FROM {id_iu} WHERE image IN %s UNION ALL SELECT image FROM {id_anno} WHERE image IN %s UNION ALL SELECT image FROM {id_pred} WHERE image IN %s ); ''').format(id_img=sql.Identifier(project, 'image'), id_iu=sql.Identifier(project, 'image_user'), id_anno=sql.Identifier(project, 'annotation'), id_pred=sql.Identifier(project, 'prediction')) queryArgs = tuple([imageList] * 4) deleteStr = sql.SQL(''' DELETE FROM {id_img} WHERE id IN %s AND id NOT IN ( SELECT image FROM {id_iu} WHERE image IN %s UNION ALL SELECT image FROM {id_anno} WHERE image IN %s UNION ALL SELECT image FROM {id_pred} WHERE image IN %s ); ''').format(id_img=sql.Identifier(project, 'image'), id_iu=sql.Identifier(project, 'image_user'), id_anno=sql.Identifier(project, 'annotation'), id_pred=sql.Identifier(project, 'prediction')) deleteArgs = tuple([imageList] * 4) # retrieve images to be deleted imgs_del = self.dbConnector.execute(queryStr, queryArgs, 'all') if imgs_del is None: imgs_del = [] if len(imgs_del): # delete images self.dbConnector.execute(deleteStr, deleteArgs, None) if deleteFromDisk: projectFolder = os.path.join( self.config.getProperty('FileServer', 'staticfiles_dir'), project) if os.path.isdir(projectFolder) or os.path.islink( projectFolder): for i in imgs_del: filePath = os.path.join(projectFolder, i['filename']) if os.path.isfile(filePath): os.remove(filePath) # convert UUID for idx in range(len(imgs_del)): imgs_del[idx]['id'] = str(imgs_del[idx]['id']) return imgs_del
async def test_execute_for_nested_workflow(async_test_client, clean_test_db_engine): patched_session = sessionmaker(clean_test_db_engine) with mock.patch( "hetdesrun.persistence.dbservice.nesting.Session", patched_session, ): with mock.patch( "hetdesrun.persistence.dbservice.revision.Session", patched_session, ): async with async_test_client as ac: json_files = [ "./transformations/components/connectors/pass-through-integer_100_57eea09f-d28e-89af-4e81-2027697a3f0f.json", "./transformations/components/connectors/pass-through-series_100_bfa27afc-dea8-b8aa-4b15-94402f0739b6.json", "./transformations/components/connectors/pass-through-string_100_2b1b474f-ddf5-1f4d-fec4-17ef9122112b.json", "./transformations/components/remaining-useful-life/univariate-linear-rul-regression_100_8d61a267-3a71-51cd-2817-48c320469d6b.json", "./transformations/components/visualization/univariate-linear-rul-regression-result-plot_100_9c3f88ce-1311-241e-18b7-acf7d3f5a051.json", "./transformations/components/arithmetic/consecutive-differences_100_ce801dcb-8ce1-14ad-029d-a14796dcac92.json", "./transformations/components/basic/filter_100_18260aab-bdd6-af5c-cac1-7bafde85188f.json", "./transformations/components/basic/greater-or-equal_100_f759e4c0-1468-0f2e-9740-41302b860193.json", "./transformations/components/basic/last-datetime-index_100_c8e3bc64-b214-6486-31db-92a8888d8991.json", "./transformations/components/basic/restrict-to-time-interval_100_bf469c0a-d17c-ca6f-59ac-9838b2ff67ac.json", "./transformations/components/connectors/pass-through-float_100_2f511674-f766-748d-2de3-ad5e62e10a1a.json", "./transformations/components/visualization/single-timeseries-plot_100_8fba9b51-a0f1-6c6c-a6d4-e224103b819c.json", "./transformations/workflows/examples/data-from-last-positive-step_100_2cbb87e7-ea99-4404-abe1-be550f22763f.json", "./transformations/workflows/examples/univariate-linear-rul-regression-example_100_806df1b9-2fc8-4463-943f-3d258c569663.json", "./transformations/workflows/examples/linear-rul-from-last-positive-step_100_3d504361-e351-4d52-8734-391aa47e8f24.json", ] for file in json_files: tr_json = load_json(file) response = await ac.put( posix_urljoin( get_config().hd_backend_api_url, "transformations", tr_json["id"], ) + "?allow_overwrite_released=True", json=tr_json, ) component_id = UUID("57eea09f-d28e-89af-4e81-2027697a3f0f") updated_component = read_single_transformation_revision( component_id) updated_component.deprecate() response = await ac.put( "/api/transformations/" + str(component_id), json=json.loads(updated_component.json(by_alias=True)), ) # linear rul from last positive step workflow_id = UUID("3d504361-e351-4d52-8734-391aa47e8f24") tr_workflow = read_single_transformation_revision(workflow_id) exec_by_id_input = ExecByIdInput( id=workflow_id, wiring=tr_workflow.test_wiring) response = await ac.post( "/api/transformations/execute", json=json.loads(exec_by_id_input.json()), ) assert response.status_code == 200 assert "output_types_by_output_name" in response.json() assert response.json()["result"] == "ok" assert (abs(response.json()["output_results_by_output_name"] ["intercept"] - 2.88) < 0.01) assert (response.json()["output_results_by_output_name"] ["before_step_detect"] == {}) assert (response.json()["output_results_by_output_name"] ["rul_regression_result_plot"] == {})
def GetApfsContainerUuid(img, container_start_offset): '''Returns a UUID object''' uuid_bytes = img.read(container_start_offset + 72, 16) uuid = UUID(bytes=uuid_bytes) return uuid
def get_list(zkhandler, node, state, tag, limit, is_fuzzy=True, negate=False): if node: # Verify node is valid if not common.verifyNode(zkhandler, node): return False, 'Specified node "{}" is invalid.'.format(node) if state: valid_states = [ "start", "restart", "shutdown", "stop", "disable", "fail", "migrate", "unmigrate", "provision", ] if state not in valid_states: return False, 'VM state "{}" is not valid.'.format(state) full_vm_list = zkhandler.children("base.domain") # Set our limit to a sensible regex if limit: # Check if the limit is a UUID is_limit_uuid = False try: uuid_obj = UUID(limit, version=4) limit = str(uuid_obj) is_limit_uuid = True except ValueError: pass if is_fuzzy and not is_limit_uuid: try: # Implcitly assume fuzzy limits if not re.match(r"\^.*", limit): limit = ".*" + limit if not re.match(r".*\$", limit): limit = limit + ".*" except Exception as e: return False, "Regex Error: {}".format(e) get_vm_info = dict() for vm in full_vm_list: name = zkhandler.read(("domain", vm)) is_limit_match = False is_tag_match = False is_node_match = False is_state_match = False # Check on limit if limit: # Try to match the limit against the UUID (if applicable) and name try: if is_limit_uuid and re.fullmatch(limit, vm): is_limit_match = True if re.fullmatch(limit, name): is_limit_match = True except Exception as e: return False, "Regex Error: {}".format(e) else: is_limit_match = True if tag: vm_tags = zkhandler.children(("domain.meta.tags", vm)) if negate and tag not in vm_tags: is_tag_match = True if not negate and tag in vm_tags: is_tag_match = True else: is_tag_match = True # Check on node if node: vm_node = zkhandler.read(("domain.node", vm)) if negate and vm_node != node: is_node_match = True if not negate and vm_node == node: is_node_match = True else: is_node_match = True # Check on state if state: vm_state = zkhandler.read(("domain.state", vm)) if negate and vm_state != state: is_state_match = True if not negate and vm_state == state: is_state_match = True else: is_state_match = True get_vm_info[vm] = ( True if is_limit_match and is_tag_match and is_node_match and is_state_match else False ) # Obtain our VM data in a thread pool # This helps parallelize the numerous Zookeeper calls a bit, within the bounds of the GIL, and # should help prevent this task from becoming absurdly slow with very large numbers of VMs. # The max_workers is capped at 32 to avoid creating an absurd number of threads especially if # the list gets called multiple times simultaneously by the API, but still provides a noticeable # speedup. vm_execute_list = [vm for vm in full_vm_list if get_vm_info[vm]] vm_data_list = list() with ThreadPoolExecutor(max_workers=32, thread_name_prefix="vm_list") as executor: futures = [] for vm_uuid in vm_execute_list: futures.append( executor.submit(common.getInformationFromXML, zkhandler, vm_uuid) ) for future in futures: try: vm_data_list.append(future.result()) except Exception: pass return True, vm_data_list
def uuid_or_none(s): return None if s == 'null' or s == '--' else UUID(s)
def project(session: FakeSession) -> Project: project = Project(name="Test GEM Tables project", session=session) project.uid = UUID('6b608f78-e341-422c-8076-35adc8828545') return project