Esempio n. 1
0
 def _dump_sign(self, data: typing.Dict):
     l = list(data.keys())
     l.sort()
     s = ''
     for i in l:
         s += i+'='+str(data[i])+'&'
     s = s[:-1]
     return hashlib.md5(s.encode('utf-8')).hexdigest()
Esempio n. 2
0
    def storeProductIndex(self, _product_dict: typing.Dict):
        for product, v in _product_dict.items():
            # for each product, compute index of product
            tmp_data_list = []
            for instrument in v['InstrumentList']:
                self.instrument_day_data_cur.execute(
                    "SELECT openprice, highprice, lowprice, closeprice, "
                    "volume, openinterest FROM {} WHERE tradingday='{}'".format(
                        instrument, v['TradingDay']
                    )
                )
                values = self.instrument_day_data_cur.fetchone()
                tmp_data_list.append(dict(zip(
                    INDEX_TABLE_KEYS[1:], values
                )))

            openprice_arr = self._get_arr(tmp_data_list, 'OpenPrice')
            highprice_arr = self._get_arr(tmp_data_list, 'HighPrice')
            lowprice_arr = self._get_arr(tmp_data_list, 'LowPrice')
            closeprice_arr = self._get_arr(tmp_data_list, 'ClosePrice')
            volume_arr = self._get_arr(tmp_data_list, 'Volume')
            openinterest_arr = self._get_arr(tmp_data_list, 'OpenInterest')

            total_openinterest = openinterest_arr.sum()
            if total_openinterest == 0:
                index_dict = {
                    'TradingDay': v['TradingDay'],
                    'OpenPrice': openprice_arr.mean(),
                    'HighPrice': highprice_arr.mean(),
                    'LowPrice': lowprice_arr.mean(),
                    'ClosePrice': closeprice_arr.mean(),
                    'Volume': int(volume_arr.sum()),
                    'OpenInterest': total_openinterest,
                }
            else:
                tmp_rate = openinterest_arr / float(total_openinterest)
                index_dict = {
                    'TradingDay': v['TradingDay'],
                    'OpenPrice': np.sum(tmp_rate * openprice_arr),
                    'HighPrice': np.sum(tmp_rate * highprice_arr),
                    'LowPrice': np.sum(tmp_rate * lowprice_arr),
                    'ClosePrice': np.sum(tmp_rate * closeprice_arr),
                    'Volume': int(volume_arr.sum()),
                    'OpenInterest': total_openinterest
                }
            # true store part
            try:
                self._store_product_index(product, index_dict)
            except psycopg2.DatabaseError as e:
                self.product_index_con.rollback()
                if e.pgcode == '42P01':
                    logging.warning(e)
                    self._create_product_index_table(product)
                    self._store_product_index(product, index_dict)
                else:
                    logging.error(e)
                    sys.exit(1)
Esempio n. 3
0
 def _check_for_invalid_ids(cls, multi_ids_dict: typing.Dict, entity_type: str):
     check_result = CheckResult(check_name=CHECK_NAMES.check_valid_ids, error_message=[])
     if not multi_ids_dict:
         check_result.result = RESULT.FAILURE
         check_result.error_message.append("No ids found.")
     for k, values in multi_ids_dict.items():
         wrong_ids = [id for id in values if not cls._is_id_valid(id)]
         if wrong_ids:
             check_result.error_message.append("Invalid " + str(k) + "(s) for " + str(entity_type) + ": " + str(wrong_ids))
             check_result.result = RESULT.FAILURE
     return check_result
Esempio n. 4
0
    def storeDominantIndex(self, _product_dict: typing.Dict):
        for product, v in _product_dict.items():
            # for each product, compute index of dominant
            dominant = v['Dominant']
            self.instrument_day_data_cur.execute(  # get the dominant data
                "SELECT openprice, highprice, lowprice, closeprice, "
                "volume, openinterest FROM {} WHERE tradingday='{}'".format(
                    dominant, v['TradingDay']
                )
            )
            values = self.instrument_day_data_cur.fetchone()
            cur_data = dict(zip(INDEX_TABLE_KEYS[1:], values))
            self.instrument_day_data_cur.execute(  # get the closeprice of last data
                "SELECT closeprice FROM {} WHERE tradingday<'{}' "
                "ORDER BY tradingday DESC LIMIT 1".format(
                    dominant, v['TradingDay']
                )
            )
            values = self.instrument_day_data_cur.fetchone()
            if values is not None:
                return_rate = cur_data['ClosePrice'] / values[0]
            else:
                return_rate = 1.0

            last_index_price = self._get_last_dominant_index(
                product, v['TradingDay']
            )
            if last_index_price is None:
                last_index_price = 1000.0
            new_index_price = last_index_price * return_rate
            price_scale = new_index_price / cur_data['ClosePrice']
            new_openprice = cur_data['OpenPrice'] * price_scale
            new_highprice = cur_data['HighPrice'] * price_scale
            new_lowprice = cur_data['LowPrice'] * price_scale

            self.dominant_index_cur.execute(
                "INSERT INTO {} VALUES "
                "(%s, %s, %s, %s, %s, %s, %s) "
                "ON CONFLICT (TradingDay) DO UPDATE SET "
                "OpenPrice = EXCLUDED.OpenPrice,"
                "HighPrice = EXCLUDED.HighPrice,"
                "LowPrice = EXCLUDED.LowPrice,"
                "ClosePrice = EXCLUDED.ClosePrice,"
                "Volume = EXCLUDED.Volume,"
                "OpenInterest = EXCLUDED.OpenInterest"
                "".format(product),
                [
                    v['TradingDay'],
                    new_openprice, new_highprice,
                    new_lowprice, new_index_price,
                    cur_data['Volume'], cur_data['OpenInterest']
                ]
            )
            self.dominant_index_con.commit()
Esempio n. 5
0
 def storeInstrumentDayData(self, _data_dict: typing.Dict):
     for instrument, v in _data_dict.items():
         try:
             self._store_instrument_day_data(instrument, v)
         except psycopg2.DatabaseError as e:
             self.instrument_day_data_con.rollback()
             if e.pgcode == '42P01':
                 logging.warning(e)
                 self._create_instrument_day_data_table(instrument)
                 self._store_instrument_day_data(instrument, v)
             else:
                 logging.error(e)
                 sys.exit(1)
Esempio n. 6
0
def init_validator(hints: typing.Dict, parent: typing.Optional[Validator]=None) -> Validator:
    """
    Returns a new validator instance from a given dictionary of type hints
    """
    validator = Validator(parent)

    for name, hint in hints.items():
        if hint is None:
            hint = type(None)

        root_parser = get_parser(None, hint, validator)
        syntax_tree = visit(root_parser)

        validator.roots[name] = syntax_tree

    return validator
Esempio n. 7
0
 def __init__(self, config: typing.Dict):
     """Initialize a text input."""
     self._config = config
     self.editable = True
     self._current_value = config.get(CONF_INITIAL)
Esempio n. 8
0
def lat_lon_to_solr(coreMetadata: typing.Dict, latitude: typing.SupportsFloat,
                    longitude: typing.SupportsFloat):
    coreMetadata.update(
        shapely_to_solr(shapely.geometry.Point(longitude, latitude)))
Esempio n. 9
0
 def __init__(self):
     self.dealias: Dict[str, ID] = {}
     self.aliases: Dict(ID, List[str]) = {}
Esempio n. 10
0
def map_processing_unit(document: typing.Dict) -> ProcessingUnit:
    id = document.get('id')
    clock_rate = document['clock-rate']
    cores = document['cores']
    return ProcessingUnit(id=id, clock_rate=clock_rate, cores=cores)
Esempio n. 11
0
def map_rack(document: typing.Dict) -> Rack:
    id = document.get('id')
    capacity = document.get('capacity')
    units = list(map(map_rack_unit, document['units']))
    return Rack(capacity=capacity, units=units, id=id)
Esempio n. 12
0
File: map.py Progetto: tek/tryp.py
 def values_view(self):
     return Dict.values(self)
Esempio n. 13
0
    def transform(self, x: t.Dict) -> xr.Dataset:
        # Basic preparation
        prefix = self.prefix

        sensor_dims = []
        sensor_dim_metadata = {}

        for y in self.sensor_dims:
            if isinstance(y, str):
                sensor_dims.append(y)
                sensor_dim_metadata[y] = {}
            else:
                sensor_dims.append(y[0])
                sensor_dim_metadata[y[0]] = y[1]

        spectral_dims = []
        spectral_dim_metadata = {}

        for y in _spectral_dims():
            if isinstance(y, str):
                spectral_dims.append(y)
                spectral_dim_metadata[y] = {}
            else:
                spectral_dims.append(y[0])
                spectral_dim_metadata[y[0]] = y[1]

        sensor_datasets = []

        regex = re.compile(
            r"\_".join(
                [prefix]
                + [rf"{sensor_dim}(?P<{sensor_dim}>\d*)" for sensor_dim in sensor_dims]
            )
        )

        # Loop on spectral indexes
        for spectral_index in x.keys():
            # Loop on sensors
            for sensor_id, sensor_data in x[spectral_index]["values"].items():
                # Collect data
                ds = sensor_data.copy(deep=False)
                spp = x[spectral_index]["spp"][sensor_id]

                # Set spectral coordinates
                spectral_coords = {
                    spectral_dim: [spectral_coord]
                    for spectral_dim, spectral_coord in zip(
                        spectral_dims, always_iterable(spectral_index)
                    )
                }

                # Detect sensor coordinates
                match = regex.match(sensor_id)

                if match is None:
                    raise RuntimeError(
                        "could not detect requested sensor dimensions in "
                        f"sensor ID '{sensor_id}' using regex '{regex.pattern}'; "
                        "this could be due to incorrect values or order of "
                        "'sensor_dims'"
                    )

                sensor_coords = {
                    f"{sensor_dim}_index": [int(match.group(sensor_dim))]
                    for sensor_dim in sensor_dims
                }

                # Add spp dimension even though sample count split did not
                # produce any extra sensor
                if "spp" not in sensor_dims:
                    sensor_coords["spp_index"] = [0]

                # Add spectral and sensor dimensions to img array
                all_coords = {**spectral_coords, **sensor_coords}
                ds["img"] = ds.img.expand_dims(dim=all_coords)

                # Package spp in a data array
                all_dims = list(all_coords.keys())
                ds["spp"] = (all_dims, np.reshape(spp, [1 for _ in all_dims]))

                sensor_datasets.append(ds)

        # Combine all the data
        with xr.set_options(keep_attrs=True):
            result = xr.merge(sensor_datasets)

        # Drop "channel" dimension when using a mono variant
        if eradiate.mode().has_flags(ModeFlags.MTS_MONO):
            result = result.squeeze("channel", drop=True)

        # Apply metadata to new dimensions
        for sensor_dim in sensor_dims:
            result[f"{sensor_dim}_index"].attrs = sensor_dim_metadata[sensor_dim]

        if "spp" not in sensor_dims:
            result["spp_index"].attrs = {
                "standard_name": "spp_index",
                "long_name": "SPP index",
            }

        for spectral_dim in spectral_dims:
            result[spectral_dim].attrs = spectral_dim_metadata[spectral_dim]

        # Apply metadata to data variables
        if isinstance(self.var, str):
            var = self.var
            var_metadata = {}
        else:
            var = self.var[0]
            var_metadata = self.var[1]

        result = result.rename({"img": var})
        result[var].attrs.update(var_metadata)

        result["spp"].attrs = {
            "standard_name": "sample_count",
            "long_name": "sample count",
        }

        return result
Esempio n. 14
0
    def resampleProbabilisticValues(self, Dict=None):
        '''
            Normal Distribution Sampling:
                If (key + "_stdDev") exists and the value being returned is a scalar or Vector value, returns a scalar or vector sampled from a normal distribution
                    Where the mean of the normal distribution is taken to be the (original) value of 'key' (moved to 'key_mean' when this function first runs) and the standard deviation of the distribution is the value of 'key_stdDev'
                    For a vector value, a vector of standard deviations is expected
                For repeatable sampling, set the value "MonteCarlo.randomSeed" in the file loaded by this class
        '''
        if Dict is None:
            Dict = self.dict

        if not self.disableDistributionSampling:
            keys = list(
                Dict.keys()
            )  # Get a list of keys at the beginning to avoid issues from the number of keys changing during iterations

            for key in keys:
                ### Sample any probabilistic values from normal distribution ###
                stdDevKey = key + "_stdDev"

                if stdDevKey in Dict:
                    logLine = None
                    meanKey = key + "_mean"

                    try:
                        meanString = Dict[meanKey]
                    except KeyError:
                        # Take the value of the variable as the mean if a _mean value is not provided
                        meanString = Dict[key]
                        Dict[meanKey] = meanString

                    # Try parsing scalar values
                    try:
                        mu = float(meanString)
                        sigma = float(Dict[stdDevKey])

                        sampledValue = self.rng.gauss(mu, sigma)
                        Dict[key] = str(sampledValue)

                        logLine = "Sampling scalar parameter: {}, value: {:1.3f}".format(
                            key, sampledValue)

                    except ValueError:
                        # Try parsing vector value
                        try:
                            muVec = Vector(meanString)
                            sigmaVec = Vector(Dict[stdDevKey])

                            sampledVec = Vector(*[
                                self.rng.gauss(mu, sigma)
                                for mu, sigma in zip(muVec, sigmaVec)
                            ])
                            Dict[key] = str(sampledVec)

                            logLine = "Sampling vector parameter: {}, value: ({:1.3f})".format(
                                key, sampledVec)

                        except ValueError:
                            # ValueError throws if either conversion to Vector fails
                            # Note that monte carlo / probabilistic variables can only be scalars or vectors
                            print(
                                "ERROR: Unable to parse probabilistic value: {} for key {} (or {} for key {}). Note that probabilistic values must be either scalars or vectors of length 3."
                                .format(meanString, meanKey,
                                        self.getValue(stdDevKey), stdDevKey))
                            raise

                    ### Logging ###
                    if logLine != None:
                        if self.monteCarloLogger != None:
                            self.monteCarloLogger.log(logLine)
                        elif not self.silent:
                            print(logLine)
Esempio n. 15
0
    def persistent(cls,
                   computing_table: CTableABC,
                   namespace,
                   name,
                   schema=None,
                   part_of_data=None,
                   engine=None,
                   engine_address=None,
                   store_type=None,
                   token: typing.Dict = None) -> StorageTableMeta:
        if engine:
            if engine != StorageEngine.PATH and engine not in Relationship.Computing.get(
                    computing_table.engine, {}).get(EngineType.STORAGE,
                                                    {}).get("support", []):
                raise Exception(
                    f"storage engine {engine} not supported with computing engine {computing_table.engine}"
                )
        else:
            engine = Relationship.Computing.get(computing_table.engine,
                                                {}).get(
                                                    EngineType.STORAGE,
                                                    {}).get("default", None)
            if not engine:
                raise Exception(
                    f"can not found {computing_table.engine} default storage engine"
                )
        if engine_address is None:
            # find engine address from service_conf.yaml
            engine_address = engine_utils.get_engines_config_from_conf().get(
                EngineType.STORAGE, {}).get(engine, {})
        address_dict = engine_address.copy()
        partitions = computing_table.partitions

        if engine == StorageEngine.STANDALONE:
            address_dict.update({"name": name, "namespace": namespace})
            store_type = StandaloneStoreType.ROLLPAIR_LMDB if store_type is None else store_type

        elif engine == StorageEngine.EGGROLL:
            address_dict.update({"name": name, "namespace": namespace})
            store_type = EggRollStoreType.ROLLPAIR_LMDB if store_type is None else store_type

        elif engine == StorageEngine.HIVE:
            address_dict.update({"database": namespace, "name": f"{name}"})
            store_type = HiveStoreType.DEFAULT if store_type is None else store_type

        elif engine == StorageEngine.LINKIS_HIVE:
            address_dict.update({
                "database": None,
                "name": f"{namespace}_{name}",
                "username": token.get("username", "")
            })
            store_type = LinkisHiveStoreType.DEFAULT if store_type is None else store_type

        elif engine == StorageEngine.HDFS:
            if not address_dict.get("path"):
                address_dict.update({
                    "path":
                    default_output_fs_path(
                        name=name,
                        namespace=namespace,
                        prefix=address_dict.get("path_prefix"))
                })
            store_type = HDFSStoreType.DISK if store_type is None else store_type

        elif engine == StorageEngine.LOCALFS:
            if not address_dict.get("path"):
                address_dict.update({
                    "path":
                    default_output_fs_path(
                        name=name,
                        namespace=namespace,
                        storage_engine=StorageEngine.LOCALFS)
                })
            store_type = LocalFSStoreType.DISK if store_type is None else store_type

        elif engine == StorageEngine.PATH:
            store_type = PathStoreType.PICTURE if store_type is None else store_type

        else:
            raise RuntimeError(f"{engine} storage is not supported")
        address = StorageTableMeta.create_address(storage_engine=engine,
                                                  address_dict=address_dict)
        schema = schema if schema else {}
        computing_table.save(address,
                             schema=schema,
                             partitions=partitions,
                             store_type=store_type)
        table_count = computing_table.count()
        table_meta = StorageTableMeta(name=name, namespace=namespace, new=True)
        table_meta.address = address
        table_meta.partitions = computing_table.partitions
        table_meta.engine = engine
        table_meta.store_type = store_type
        table_meta.schema = schema
        table_meta.part_of_data = part_of_data if part_of_data else {}
        table_meta.count = table_count
        table_meta.write_access_time = current_timestamp()
        table_meta.origin = StorageTableOrigin.OUTPUT
        table_meta.create()
        return table_meta
Esempio n. 16
0
File: map.py Progetto: tek/tryp.py
 def get(self, key):
     return Dict.get(self, key)
Esempio n. 17
0
File: map.py Progetto: tek/tryp.py
 def v(self):
     return List(*Dict.values(self))
Esempio n. 18
0
File: map.py Progetto: tek/tryp.py
 def k(self):
     return List(*Dict.keys(self))
Esempio n. 19
0
 def entities(self) -> Dict(str, Entity):
   """Returns a mapping of entity names to Entity instances."""
   return self._entities
 def __init__(self):
     self.bones: Dict(str, 'Animation.Bone') = {}
Esempio n. 21
0
    def get_cs_dimensions(api_config: typing.Dict) -> ConfigurationSpace:
        """
        Help routine to setup ConfigurationSpace search space in constructor.
        Take api_config as argument so this can be static.
        Parameters
        ----------
        api_config: Dict
            api dictionary to construct
        Returns
        -------
        cs: ConfigurationSpace
            ConfigurationSpace that contains the same hyperparameter as api_config
        """
        # TODO 2 options to transform the real and int hyperaparameters in different scales
        #  option 1: similar to example_submission.skopt.optimizer, merge 'logit' into 'log' and 'bilog' into 'linear'
        #  option 2: use the api bayesmark.space.space to warp and unwarp the samples
        cs = ConfigurationSpace()
        param_list = sorted(api_config.keys())

        hp_list = []
        for param_name in param_list:
            param_config = api_config[param_name]

            param_type = param_config["type"]
            param_space = param_config.get("space", None)
            param_values = param_config.get("values", None)
            param_range = param_config.get("range", None)

            if param_type == "cat":
                assert param_space is None
                assert param_range is None
                hp = CategoricalHyperparameter(name=param_name,
                                               choices=param_values)
            elif param_type == "bool":
                assert param_space is None
                assert param_values is None
                assert param_range is None
                hp = CategoricalHyperparameter(name=param_name,
                                               choices=[True, False])
            elif param_type == "ordinal":
                # appear in example_submission.skopt.optimizer but not in README
                assert param_space is None
                assert param_range is None
                hp = OrdinalHyperparameter(name=param_name,
                                           sequence=param_values)
            elif param_type in ("int", "real"):
                if param_values is not None:
                    # TODO: decide whether we treat these parameters as discrete values
                    #  or step function (example see example_submission.skopt.optimizer, line 71-77)
                    # sort the values to store them in OrdinalHyperparameter
                    param_values_sorted = np.sort(param_values)
                    hp = OrdinalHyperparameter(name=param_name,
                                               sequence=param_values_sorted)
                else:
                    log = True if param_space == "log" else False

                    if param_type == "int":
                        hp = UniformIntegerHyperparameter(
                            name=param_name,
                            lower=param_range[0],
                            upper=param_range[-1],
                            log=log)
                    else:
                        hp = UniformFloatHyperparameter(name=param_name,
                                                        lower=param_range[0],
                                                        upper=param_range[-1],
                                                        log=log)
            else:
                assert False, "type %s not handled in API" % param_type
            hp_list.append(hp)
        cs.add_hyperparameters(hp_list)
        return cs
Esempio n. 22
0
 def __init__(self, name: str, device_type: SystemType):
     self.name = name
     self.type = device_type
     self.bandwidth = 0
     self.ping = 0
     self.benchmarks: Dict(value, Scenario_result) = {}
Esempio n. 23
0
def map_machine(document: typing.Dict) -> PhysicalMachine:
    id = document.get('id')
    state = MachineState[document.get('state', 'RUNNING')]
    cpus = list(map(map_processing_unit, document.get('cpus', [])))
    gpus = list(map(map_processing_unit, document.get('gpus', [])))
    return PhysicalMachine(id=id, state=state, cpus=cpus, gpus=gpus)
Esempio n. 24
0
    def __new__(mcs, name: typing.AnyStr, bases: typing.Tuple,
                class_dict: typing.Dict):
        class_dict_copy = class_dict.copy()
        _id = Field(
            verbose_name='Instance ID',
            data_type=str,
            required=True,
            default=None,
        )
        _ts = Field(
            verbose_name='Timestamp',
            data_type=int,
            required=True,
            default=lambda: time.time(),
        )
        class_dict_copy.update({
            '_id': _id,
            '_ts': _ts,
        })

        # init all field instances
        for k, v in class_dict_copy.items():
            if isinstance(v, Field):
                v.name = k
                v.fullname = f'{name.lower()}.{k}'
                v.internal_name = f'field__{k}'

        meta_data: typing.Dict = {}
        meta_class = class_dict_copy.get('Meta')
        if meta_class:
            assert isinstance(meta_class, type)
            meta_class_dict = meta_class.__dict__
            for mk, mv in meta_class_dict.items():
                if not mk.startswith('__'):
                    meta_data.update({mk: mv})
        class_dict_copy.update({'meta_data': meta_data})

        def generate_id(self):
            return 'test'

        class_dict_copy.update({'generate_id': generate_id})

        def initialize_instance(self, init_data: typing.Dict):
            cls_dict: typing.Dict = self.__class__.__dict__
            for sk, sv in cls_dict.items():
                if isinstance(sv, Field):
                    if sk in init_data:
                        sk_v = init_data[sk]

                        data_type: typing.Any = sv.data_type
                        multi: bool = sv.multi

                        if multi:
                            assert isinstance(sk_v, list)
                            if isinstance(data_type, BlueprintMeta):
                                v_deserialized = []
                                for d in sk_v:
                                    # init every blueprint instance
                                    deserialized_instance = data_type(**d)
                                    # who is the parent of this blueprint ?
                                    deserialized_instance.parent = self
                                    v_deserialized.append(
                                        deserialized_instance)
                            else:
                                # Note: create new instance of list
                                v_deserialized = [item for item in sk_v]
                        else:
                            assert not isinstance(sk_v, list)
                            if isinstance(data_type, BlueprintMeta):
                                v_deserialized = data_type(**sk_v)
                                v_deserialized.parent = self
                            else:
                                v_deserialized = sk_v
                        # set attr value (through descriptor)
                        setattr(self, sk, v_deserialized)
                    else:
                        # user not provide value for the field, use default value to initialize the field if possible
                        # default value for multi field should be []
                        if sv.multi and sv.default is None:
                            sv.default = []
                        # if data type of field is Blueprint, create a new blueprint instance as default value
                        if sv.data_type and isinstance(sv.data_type,
                                                       BlueprintMeta):
                            if sv.default and isinstance(
                                    sv.default, Blueprint):
                                sv.default = copy.copy(sv.default)

                        # will get default value if any,
                        # and set instance field value to default (force check and clean, create new instance if needed)
                        sk_v = getattr(self, sk)

                        # after initialize, value of every field should be in valid state (pass descriptor's check)
                        # check_and_clean_if_possible
                        setattr(self, sk, sk_v)

            # generate id if needed
            if self.is_new:
                assert getattr(self, self.ID_NAME) is None
                id_template = self.meta_data.get('id_template')
                if id_template is None:
                    raise BlueprintTypeException(
                        f'cannot generate id for new created blueprint '
                        f'because id_template not specified in Meta')
                context_render: typing.Dict = {
                    key: getattr(self, key)
                    for key in cls_dict if isinstance(cls_dict[key], Field)
                }
                new_id = id_template.format(**context_render)
                setattr(self, self.ID_NAME, new_id)

            # check required value
            for sk, sv in cls_dict.items():
                if isinstance(sv, Field):
                    sk_v = getattr(self, sk, None)
                    if sk_v is None and sv.required and sv.default is None:
                        raise BlueprintTypeException(
                            f'{sv.fullname} is required '
                            f'but no value provided and no default value set')

        def init(self, **kwargs):
            self.kwargs = kwargs  # used to copy a new blueprint instance
            self.parent = None
            self.id_context = {}
            self.is_new = False

            if self.ID_NAME not in kwargs:
                self.is_new = True
            self.initialize_instance(kwargs)

        def serialize(self, selected_fields=None):
            if selected_fields is None:
                selected_fields = []
            else:
                selected_fields = list(selected_fields)
            cls_dict: typing.Dict = self.__class__.__dict__
            serialized: typing.Dict = {}

            if not self.should_serialize():
                return serialized
            else:
                for sk, sv in cls_dict.items():
                    if isinstance(sv, Field):
                        if selected_fields and sv.name not in selected_fields:
                            continue
                        sk_v = getattr(self, sk)

                        # serialize each field according to sv.data_type and sv.multi
                        data_type: typing.Any = sv.data_type
                        multi: bool = sv.multi
                        if multi:
                            # should serialize each item in the value
                            assert isinstance(sk_v, list)
                            if isinstance(data_type, BlueprintMeta):
                                serialized.update(
                                    {sk: [item.serialize() for item in sk_v]})
                            else:
                                # just create a new list with the same content
                                serialized.update(
                                    {sk: [item for item in sk_v]})
                        else:
                            assert not isinstance(sk_v, list)
                            if isinstance(data_type, BlueprintMeta):
                                if sk_v.should_serialize():
                                    serialized.update({sk: sk_v.serialize()})
                            else:
                                serialized.update({sk: sk_v})
            return serialized

        def should_serialize(self):
            return True

        class_dict_copy.update({
            'ID_NAME': '_id',
            'TS_NAME': '_ts',
            '__init__': init,
            'initialize_instance': initialize_instance,
            'serialize': serialize,
            'should_serialize': should_serialize,
        })
        cls = type.__new__(mcs, name, bases, class_dict_copy)
        return cls
 def from_dict(cls, raw: typing.Dict):
     try:
         report = cls(raw["algorithm_id"], raw.get("extra_parameters"))
     except KeyError:
         raise
     return report
Esempio n. 26
0
def error_checker(data: typing.Dict) -> None:
    if data.get("user_id") or data.get("channel_id"):
        raise InvalidSnowflake(
            data.get("user_id")[0] if data.get("user_id") != None else data.
            get("channel_id")[0])
    elif data.get("code"):
        if data.get("code") == 50001:
            raise MissingAccess(data.get("message"))
        else:
            raise InvalidSnowflake(data.get("message"))
    elif data.get("avatar"):
        if isinstance(data.get("avatar"), list):
            raise AvatarError(data.get("avatar")[0])
Esempio n. 27
0
def map_storage(
    fgraph: FunctionGraph,
    order: typing.Iterable[Apply],
    input_storage: typing.Optional[typing.List],
    output_storage: typing.Optional[typing.List],
    storage_map: typing.Dict = None,
) -> typing.Tuple[typing.List, typing.List, typing.Dict]:
    """Ensure there is storage (a length-1 list) for inputs, outputs, and interior nodes.

    :param fgraph: The current fgraph.  This function uses the inputs and outputs attributes.
    :param order: an iterable over Apply instances (in program running order)
    :param input_storage: None or existing input storage (see below)
    :param output_storage: None or existing output storage (see below)

    :rtype: 3-tuple
    :returns: (list of storage for inputs, list of storage for outputs, and the `storage_map`)

    Parameters
    ----------
    fgraph
        The current fgraph. This function uses the inputs and outputs
        attributes.
    order
        An iterable over Apply instances (in program running order).
    input_storage
        None or existing input storage (see below).
    output_storage
        None or existing output storage (see below).

    Returns
    -------
    3-tuple
        List of storage for inputs, list of storage for outputs, and
        the `storage_map`.

    Extended summary
    ----------------
    This function iterates over the nodes in `order` and ensures that for every
    input and output `Variable`, there is a unique storage container. This is
    returned as a dictionary Variable -> storage called the `storage_map`.

    This function also returns `input_storage`, which is a list of storages
    corresponding to fgraph.inputs.
    This function also returns `output_storage`, which is a list of storages
    corresponding to fgraph.outputs.

    """
    # each Apply argument's data is stored in a list of length 1 (these lists act like pointers)

    if storage_map is None:
        storage_map = {}

    # input_storage is a list of data-containers for the inputs.
    if input_storage is None:
        input_storage = [[None] for input in fgraph.inputs]
    else:
        assert len(fgraph.inputs) == len(input_storage)

    # add input storage into storage_map
    for r, storage in zip(fgraph.inputs, input_storage):
        if r in storage_map:
            assert storage_map[r] is storage, (
                "Given input_storage conflicts "
                "with storage in given storage_"
                "map. Given input_storage: ",
                storage,
                "Storage in storage_ma" "p: ",
                storage_map[r],
            )
        else:
            storage_map[r] = storage
    #     for orphan in fgraph.orphans:
    #         if not isinstance(orphan, Constant):
    #             raise TypeError("Cannot link a graph with non-constant orphans.", orphan)
    #         storage_map[orphan] = [orphan.data]

    # allocate output storage
    if output_storage is not None:
        assert len(fgraph.outputs) == len(output_storage)
        for r, storage in zip(fgraph.outputs, output_storage):
            if r in storage_map:
                assert storage_map[r] is storage, (
                    "Given output_storage confl"
                    "icts with storage in given"
                    " storage_map. Given output"
                    "_storage: ",
                    storage,
                    "Sto" "rage in storage_map: ",
                    storage_map[r],
                )
            else:
                storage_map[r] = storage

    # allocate storage for intermediate computation
    for node in order:
        for r in node.inputs:
            if r not in storage_map:
                assert isinstance(r, Constant)
                storage_map[r] = [r.data]
        for r in node.outputs:
            storage_map.setdefault(r, [None])
    for r in fgraph.outputs:
        if isinstance(r, Constant):
            storage_map.setdefault(r, [r.data])

    # extract output storage
    if output_storage is None:
        output_storage = [storage_map[r] for r in fgraph.outputs]

    return input_storage, output_storage, storage_map
Esempio n. 28
0
def write_templated_file_to_path(path: Path,
                                 lookup_table: typing.Dict) -> None:
    logger.debug(
        f"Writing templated file {get_templates_path(path.name)} to {path}")
    path.write_text(get_template_file(lookup_table.get(path.name)))
Esempio n. 29
0
def log_from_changes(changes: ty.Dict) -> str:
    halfway = {title: '\n'.join(f'  - {item}' for item in content)
               for title, content in changes.items()}
    res = '\n\n'.join(f'### {title}\n{content}' for title, content in halfway.items())
    return res
Esempio n. 30
0
def null_to_string(d: typing.Dict) -> None:
    for k in d.keys():
        if isinstance(d[k], dict):
            null_to_string(d[k])
        elif d[k] is None:
            d[k] = ""
Esempio n. 31
0
 def get_distribution_info(
     self,
     csw_distribution: etree.Element,
     api_record: typing.Dict,
     harvestable_resource: models.HarvestableResource,
 ) -> resourcedescriptor.RecordDistribution:
     online_elements = csw_distribution.xpath(
         ".//gmd:transferOptions//gmd:onLine",
         namespaces=csw_distribution.nsmap)
     link = None
     wms = None
     wfs = None
     wcs = None
     legend = None
     geojson = None
     original = None
     original_format_values = (
         "original dataset format",
         "hosted document format",
     )
     for online_el in online_elements:
         protocol = get_xpath_value(online_el, ".//gmd:protocol").lower()
         linkage = get_xpath_value(online_el, ".//gmd:linkage")
         description = (get_xpath_value(online_el, ".//gmd:description")
                        or "").lower()
         if "link" in protocol:
             link = linkage
         elif "ogc:wms" in protocol:
             wms = linkage
         elif "ogc:wfs" in protocol:
             wfs = linkage
         elif "ogc:wcs" in protocol:
             wcs = linkage
         elif "legend" in description.lower():
             legend = linkage
         elif "geojson" in description.lower():
             geojson = linkage
         elif api_record.get("doc_file") is not None:
             # NOTE: for resources of type document, the GeoNode API returns a
             # relative URL which can be used directly, as opposed to its CSW API,
             # which returns a generic download URL
             document_url: str = api_record.get("doc_file")
             if document_url.startswith("/"):
                 original = f"{self.remote_url}{document_url}"
             else:
                 original = document_url
         else:
             for original_value in original_format_values:
                 if original_value in description.lower():
                     original = linkage
                     break
     return resourcedescriptor.RecordDistribution(
         link_url=link,
         wms_url=wms,
         wfs_url=wfs,
         wcs_url=wcs,
         thumbnail_url=self._retrieve_thumbnail_url(api_record,
                                                    harvestable_resource),
         legend_url=legend,
         geojson_url=geojson,
         original_format_url=original,
     )
Esempio n. 32
0
def parse_query(query_str: str, res: typing.Dict = None) -> typing.Dict:
    """
    Basically takes a query string (like the ones in the examples of commands for the search__offers function) and
    processes it into a dict of URL parameters to be sent to the server.

    :param str query_str:
    :param Dict res:
    :return Dict:
    """
    if res is None: res = {}
    if type(query_str) == list:
        query_str = " ".join(query_str)
    query_str = query_str.strip()
    opts = re.findall(
        "([a-zA-Z0-9_]+)( *[=><!]+| +(?:[lg]te?|nin|neq|eq|not ?eq|not ?in|in) )?( *)(\[[^\]]+\]|[^ ]+)?( *)",
        query_str)
    # res = {}
    op_names = {
        ">=": "gte",
        ">": "gt",
        "gt": "gt",
        "gte": "gte",
        "<=": "lte",
        "<": "lt",
        "lt": "lt",
        "lte": "lte",
        "!=": "neq",
        "==": "eq",
        "=": "eq",
        "eq": "eq",
        "neq": "neq",
        "noteq": "neq",
        "not eq": "neq",
        "notin": "notin",
        "not in": "notin",
        "nin": "notin",
        "in": "in",
    }

    field_alias = {
        "cuda_vers": "cuda_max_good",
        "display_active": "gpu_display_active",
        "reliability": "reliability2",
        "dlperf_usd": "dlperf_per_dphtotal",
        "dph": "dph_total",
        "flops_usd": "flops_per_dphtotal",
    }

    field_multiplier = {
        "cpu_ram": 1000,
        "gpu_ram": 1000,
        "duration": 1.0 / (24.0 * 60.0 * 60.0),
    }

    fields = {
        "compute_cap",
        "cpu_cores",
        "cpu_cores_effective",
        "cpu_ram",
        "cuda_max_good",
        "driver_version",
        "disk_bw",
        "disk_space",
        "dlperf",
        "dlperf_per_dphtotal",
        "dph_total",
        "duration",
        "external",
        "flops_per_dphtotal",
        "gpu_display_active",
        # "gpu_ram_free_min",
        "gpu_mem_bw",
        "gpu_name",
        "gpu_ram",
        "has_avx",
        "host_id",
        "id",
        "inet_down",
        "inet_down_cost",
        "inet_up",
        "inet_up_cost",
        "min_bid",
        "mobo_name",
        "num_gpus",
        "pci_gen",
        "pcie_bw",
        "reliability2",
        "rentable",
        "rented",
        "storage_cost",
        "total_flops",
        "verified"
    }

    joined = "".join("".join(x) for x in opts)
    if joined != query_str:
        raise ValueError(
            "Unconsumed text. Did you forget to quote your query? " +
            repr(joined) + " != " + repr(query_str))
    for field, op, _, value, _ in opts:
        value = value.strip(",[]")
        v = res.setdefault(field, {})
        op = op.strip()
        op_name = op_names.get(op)

        if field in field_alias:
            field = field_alias[field]

        if not field in fields:
            print(
                "Warning: Unrecognized field: {}, see list of recognized fields."
                .format(field),
                file=sys.stderr)
        if not op_name:
            raise ValueError(
                "Unknown operator. Did you forget to quote your query? " +
                repr(op).strip("u"))
        if op_name in ["in", "notin"]:
            value = [x.strip() for x in value.split(",") if x.strip()]
        if not value:
            raise ValueError(
                "Value cannot be blank. Did you forget to quote your query? " +
                repr((field, op, value)))
        if not field:
            raise ValueError(
                "Field cannot be blank. Did you forget to quote your query? " +
                repr((field, op, value)))
        if value in ["?", "*", "any"]:
            if op_name != "eq":
                raise ValueError("Wildcard only makes sense with equals.")
            if field in v:
                del v[field]
            if field in res:
                del res[field]
            continue

        if field in field_multiplier:
            value = str(float(value) * field_multiplier[field])

        v[op_name] = value
        res[field] = v
    return res
Esempio n. 33
0
    def from_dict(cls, model_dict: typing.Dict) -> typing.Type['Model']:
        """Creates a model from a dictionary.

        Parameters
        ----------
        model_dict :
            Dictionary containing the model.
        """

        model = cls()

        # iterate over items
        for name, attribute in list(model_dict.items()):

            # we determine if we the item is known by the model by looking for
            # a setter with same name.

            if hasattr(model, f'set_{name}'):

                # get the set function
                set = getattr(model, f'set_{name}')

                # we retrieve the actual class from the signature
                for label, item in attribute.items():
                    item_cls = set.__func__.__annotations__['item']
                    is_typed = hasattr(item_cls, "_glotaran_model_attribute_typed")
                    if isinstance(item, dict):
                        if is_typed:
                            if 'type' not in item:
                                raise Exception(f"Missing type for attribute '{name}'")
                            item_type = item['type']

                            if item_type not in item_cls._glotaran_model_attribute_types:
                                raise Exception(f"Unknown type '{item_type}' "
                                                f"for attribute '{name}'")
                            item_cls = \
                                item_cls._glotaran_model_attribute_types[item_type]
                        item['label'] = label
                        set(label, item_cls.from_dict(item))
                    elif isinstance(item, list):
                        if is_typed:
                            if len(item) < 2 and len(item) != 1:
                                raise Exception(f"Missing type for attribute '{name}'")
                            item_type = item[1] if len(item) != 1 and \
                                hasattr(item_cls, 'label') else item[0]

                            if item_type not in item_cls._glotaran_model_attribute_types:
                                raise Exception(f"Unknown type '{item_type}' "
                                                f"for attribute '{name}'")
                            item_cls = \
                                item_cls._glotaran_model_attribute_types[item_type]
                        item = [label] + item
                        set(label, item_cls.from_list(item))
                del model_dict[name]

            elif hasattr(model, f'add_{name}'):

                # get the set function
                add = getattr(model, f'add_{name}')

                # we retrieve the actual class from the signature
                for item in attribute:
                    item_cls = add.__func__.__annotations__['item']
                    is_typed = hasattr(item_cls, "_glotaran_model_attribute_typed")
                    if isinstance(item, dict):
                        if is_typed:
                            if 'type' not in item:
                                raise Exception(f"Missing type for attribute '{name}'")
                            item_type = item['type']

                            if item_type not in item_cls._glotaran_model_attribute_types:
                                raise Exception(f"Unknown type '{item_type}' "
                                                f"for attribute '{name}'")
                            item_cls = \
                                item_cls._glotaran_model_attribute_types[item_type]
                        add(item_cls.from_dict(item))
                    elif isinstance(item, list):
                        if is_typed:
                            if len(item) < 2 and len(item) != 1:
                                raise Exception(f"Missing type for attribute '{name}'")
                            item_type = item[1] if len(item) != 1 and \
                                hasattr(item_cls, 'label') else item[0]

                            if item_type not in item_cls._glotaran_model_attribute_types:
                                raise Exception(f"Unknown type '{item_type}' "
                                                f"for attribute '{name}'")
                            item_cls = \
                                item_cls._glotaran_model_attribute_types[item_type]
                        add(item_cls.from_list(item))
                del model_dict[name]

        return model
    def from_dict(cls, init_dict: tg.Dict) -> 'MetaData':
        meta_dict = init_dict['meta_dict']
        init_args = init_dict.get('init_args', [])
        init_kwargs = init_dict.get('init_kwargs', {})

        return cls(meta_dict, *init_args, **init_kwargs)
Esempio n. 35
0
def synthesize_library(
    library: typing.Dict,
    github_token: str,
    extra_args: typing.List[str],
    base_log_path: pathlib.Path,
    runner: Runner = _execute,
) -> typing.Dict:
    """Run autosynth on a single library.

    Arguments:
        library {dict} - Library configuration

    """
    logger.info(f"Synthesizing {library['name']}.")

    command = [sys.executable, "-m", "autosynth.synth"]

    env = os.environ
    env["GITHUB_TOKEN"] = github_token

    library_args = [
        "--repository",
        library["repository"],
        "--synth-path",
        library.get("synth-path", ""),
        "--branch-suffix",
        library.get("branch-suffix", ""),
        "--pr-title",
        library.get("pr-title", ""),
        "--base-log-dir",
        str(base_log_path),
    ]

    if library.get("metadata-path"):
        library_args.extend(["--metadata-path", library.get("metadata-path")])

    if library.get("deprecated-execution", False):
        library_args.append("--deprecated-execution")

    log_file_dir = (pathlib.Path(base_log_path) / pathlib.Path(
        library.get("synth-path", "") or library["repository"]).name)
    log_file_path = log_file_dir / "sponge_log.log"
    # run autosynth in a separate process
    (returncode, output) = runner(
        command + library_args + library.get("args", []) + extra_args,
        env,
        log_file_path,
    )
    error = returncode not in (0, synth.EXIT_CODE_SKIPPED)
    skipped = returncode == synth.EXIT_CODE_SKIPPED
    # Leave a sponge_log.xml side-by-side with sponge_log.log, and sponge
    # will understand they're for the same task and render them accordingly.
    results = [{
        "name": library["name"],
        "error": error,
        "output": "See the test log.",
        "skipped": skipped,
    }]
    make_report(library["name"], results, log_file_dir)
    if error:
        logger.error(f"Synthesis failed for {library['name']}")
    return {
        "name": library["name"],
        "output": output.decode("utf-8"),
        "error": error,
        "skipped": skipped,
    }
Esempio n. 36
0
def matches(small: typing.Dict, group: typing.Dict):
    for key in small.keys():
        if key not in group or small[key] != group[key]:
            return False
    return True
Esempio n. 37
0
from typing import List,Dict
import sqlite3

Book = Dict(str, Union(str,int))

def func_that_has_no_return() -> None: # We add the type that is returned so that if this fct is assigned to a var it wil give a warning
    print('There is no return in this function')

def funct_that_returns_a_list_of_dict () -> List[Dict]: # List and Dict exists in the Typing package
    return [{'a':1, 'b':2}]

def function_with_param (name: str, age : int) -> int:
    if age > 100
        return 1
    else:
        return 0

def db_connection() -> sqlite3.Connection: # we must return a Connection class type variable
    return sqlite3.connect()

var1 = func_that_has_no_return()
print(var1)

print (funct_that_returns_a_list_of_dict())

print(function_with_param(4,5)) # the first argument has a yellow backgroun, wth a warning also, than its a int and not a str

connection1_var = db_connection()

Esempio n. 38
0
File: map.py Progetto: tek/tryp.py
 def keys_view(self):
     return Dict.keys(self)
Esempio n. 39
0
 def get_source(self, source_config: typing.Dict):
     cls = self._sources[source_config['name']]
     return cls(source_config.get('authentication_key', ""))
Esempio n. 40
0
def map_datacenter(document: typing.Dict) -> Datacenter:
    id = document.get('id')
    rooms = list(map(map_room, document['rooms']))
    return Datacenter(rooms=rooms, id=id)
Esempio n. 41
0
def stringify_dict(inp: typing.Dict):
    return {str(k): str(v) for k, v in inp.items()}
Esempio n. 42
0
def map_room(document: typing.Dict) -> Room:
    id = document.get('id')
    type = RoomType[document.get('type', 'SERVER')]
    objects = list(map(map_room_object, document.get('objects', [])))
    return Room(objects=objects, type=type, id=id)