Esempio n. 1
0
 async def mset(self, mapping: ty.Dict = {}, expire: int = 0, *args):
     assert len(mapping.keys()) + len(args) < 20, "Max keys for mset is 20!"
     if expire > 0:
         for k, v in mapping.items():
             await self.set(key=k, value=v, expire=expire)
     else:
         for k, v in mapping.items():
             args = args + (k, v)
         await self._rclient.mset(*args)
Esempio n. 2
0
    def generate(
        self, generation_specification: typing.Dict
    ) -> typing.List[collections.OrderedDict]:
        '''
        This class takes a generation specification and outputs a list of specifications based on it.
        The format is as follows
        {
            "a": [1,2,3]
            "b": [1,2]
        }

        Generates

        [{
            "a": 1,
            "b": 1
        },
        {
            "a": 2,
            "b": 1
        },
        {
            "a": 3,
            "b": 1
        },
        {
            "a": 1,
            "b": 2
        },
        {
            "a": 2,
            "b": 2
        },
        {
            "a": 3,
            "b": 2
        }
        ]

        if you want key with a list value put it in double brackets ie
        {
            "a": [[1,2,3]]
        }
        '''
        generation_specification = collections.OrderedDict(
            sorted(generation_specification.items()))
        iterators = []
        for key, value in generation_specification.items():
            if isinstance(value, list):
                iterators.append(list(map(lambda x: (key, x), value)))
        specifications = []
        for updates in itertools.product(*iterators):
            cur_j = copy.deepcopy(generation_specification)
            for update_key, update_value in updates:
                cur_j[update_key] = update_value
            specifications.append(cur_j)
        return specifications
Esempio n. 3
0
    def aggregate_calculation_times(
            self, flat_trace: typing.Dict) -> typing.Dict[str, typing.Dict]:
        def _aggregate_calculations(calculations):
            calculation_count = len(calculations)
            calculation_time = sum(calculation[1]['calculation_time']
                                   for calculation in calculations)
            formula_time = sum(calculation[1]['formula_time']
                               for calculation in calculations)
            return {
                'calculation_count':
                calculation_count,
                'calculation_time':
                TraceNode.round(calculation_time),
                'formula_time':
                TraceNode.round(formula_time),
                'avg_calculation_time':
                TraceNode.round(calculation_time / calculation_count),
                'avg_formula_time':
                TraceNode.round(formula_time / calculation_count),
            }

        all_calculations = sorted(flat_trace.items())
        return {
            variable_name: _aggregate_calculations(list(calculations))
            for variable_name, calculations in itertools.groupby(
                all_calculations,
                lambda calculation: calculation[0].split('<')[0])
        }
Esempio n. 4
0
def fix_upper_case(header_dict: typing.Dict):
    output_dict = {}
    for key, value in header_dict.items():
        key = key.split('-')
        key = '-'.join([i.capitalize() for i in key])
        output_dict[key] = value
    return output_dict
Esempio n. 5
0
    def convert_input(self, in_data: typing.Dict, **kwargs) -> typing.Dict:
        in_data["route_type"] = schema_utils.numbered_type_enum_key(
            in_data["route_type"])
        for key in ("route_desc", "route_fare_class"):
            in_data[key] = in_data[key].replace(" ", "_").lower()

        return {k: v for k, v in in_data.items() if v}
Esempio n. 6
0
def _shortest_path_to_json_format(
        src: str, single_source_shortest_paths: typing.Dict) -> typing.List:
    """
    convert to json format specified by coauthor.json
    :param src: shortest path source node
    :param single_source_shortest_paths: specified by `bfs` output, dict of shortest path to destination nodes, e.g.
    (node = "Susan") {"Bob": ["Bob", "Ken", "Alice", "Susan"], ...}
    :return: specified by coauthor.json, list of {"source": src, "target": dst,
    "node_path": [{"name": name}],
    "edge_path": [edge_0, edge_1, ..., edge_p]}, edge_i: {"source": src_i, "target": dst_i}
    """
    ret = list()
    for dst, path in single_source_shortest_paths.items():
        if src >= dst:
            continue
        node_path = [{"name": name} for name in path]
        edge_path = [{
            "source": path[i],
            "target": path[i + 1]
        } for i in range(len(path) - 1)]
        ret.append({
            "source": src,
            "target": dst,
            "node_path": node_path,
            "edge_path": edge_path
        })
    return ret
Esempio n. 7
0
    def _serialize_dct(
        self,
        metadata: DictionaryMeta,
        values: typing.Dict,
    ) -> ET.Element:
        obj = ET.Element("Obj", RefId=self._get_obj_id())
        if not isinstance(metadata, DictionaryMeta):
            metadata = DictionaryMeta(name=metadata.name,
                                      optional=metadata.optional)
        self._create_tn(obj, metadata.dict_types)

        dct = ET.SubElement(obj, "DCT")

        # allow dicts to be defined as a tuple so that the order is kept
        iterator: typing.Iterable[typing.Tuple[typing.Any, typing.Any]]
        if isinstance(values, tuple):
            iterator = values
        else:
            iterator = values.items()

        for key, value in iterator:
            en = ET.SubElement(dct, "En")
            key_meta = copy(metadata.dict_key_meta)
            value_meta = copy(metadata.dict_value_meta)
            self.serialize(key, key_meta, parent=en, clear=False)
            self.serialize(value, value_meta, parent=en, clear=False)

        return obj
Esempio n. 8
0
    def info(self, kvpairs: typing.Dict) -> None:
        super().info(kvpairs)

        self.open()

        for (key, val) in sorted(kvpairs.items()):
            self.writer.add_text(key, str(val), 0)
Esempio n. 9
0
 def get_component_implicit_plugins(self, sub_dict: typing.Dict = None):
     """
     Retrieves plugin data from plugins attached to `presalytics.story.components`
     classes referenced in the `presalytics.story.outline.StoryOutline`
     """
     if not sub_dict:
         sub_dict = self.story_outline.to_dict()
     if sub_dict:
         for key, val in sub_dict.items():
             if key in ["widgets", "themes", "pages"]:
                 if isinstance(val, list):
                     for list_item in val:
                         if isinstance(list_item, dict):
                             if "kind" in list_item:
                                 class_key = key.rstrip("s") + "." + list_item["kind"]
                                 klass = presalytics.COMPONENTS.get(class_key)
                                 if klass:
                                     if len(klass.__plugins__) > 0:
                                         self.plugins.extend(klass.__plugins__)                           
             if isinstance(val, dict):
                 if len(val.keys()) > 0:
                     self.get_component_implicit_plugins(val)
             if isinstance(val, list):
                 for list_item in val:
                     if isinstance(list_item, dict):
                         self.get_component_implicit_plugins(list_item)
Esempio n. 10
0
 def update_outline_from_instances(self, sub_dict: typing.Dict = None):
     """
     If a component instance for the widget is available in `presalytics.COMPONENTS`, 
     this method find the instance and regenerates the component data 
     so the latest data is available during the renering process.
     """
     if not sub_dict:
         sub_dict = self.story_outline.to_dict()
     if sub_dict:
         for key, val in sub_dict.items():
             if key in ["widgets", "themes", "pages"]:
                 if isinstance(val, list):
                     for list_item in val:
                         if isinstance(list_item, dict):
                             if "kind" in list_item:
                                 class_key = key.rstrip("s") + "." + list_item["kind"]
                                 klass = presalytics.COMPONENTS.get(class_key)
                                 if klass:
                                     if "name" in list_item:
                                         instance_key = class_key + "." + list_item["name"]
                                         inst = presalytics.COMPONENTS.get_instance(instance_key)
                                         if inst:
                                             self._set_outline_data_from_instance(inst)
             if isinstance(val, dict):
                 if len(val.keys()) > 0:
                     self.update_outline_from_instances(val)
             if isinstance(val, list):
                 for list_item in val:
                     if isinstance(list_item, dict):
                         self.update_outline_from_instances(list_item)
Esempio n. 11
0
    def emit_cud_locally(cls, resource_name: str, payload: typing.Dict):
        """Performs a CUD action in the Model class that was previously
        subscribed to CUD changes using the same resource_name"""
        model_class: Model = cls.map_event_to_model_class.get(
            resource_name, None)
        if not model_class:
            # In case the service is subscribed to
            # the CUD event using an event handler
            cls.emit_locally(resource_name, payload)
            return

        # Remove field that's not part of the ObjectModel class
        object_id = payload['id']
        cud_operation = payload.pop('cud_operation')

        if cud_operation == CudEvent.DELETED:
            model_class.objects.filter(pk=object_id).delete()
        else:
            try:
                model_instance: ObjectModel = model_class.objects.get(
                    pk=object_id, )
            except model_class.DoesNotExist:
                model_instance: ObjectModel = model_class(pk=object_id)

            # This is the way that DRF uses for updating models
            for attr, value in payload.items():
                setattr(model_instance, attr, value)

            model_instance.save()
Esempio n. 12
0
    def post_project(
        self,
        extra_whitesource_config: typing.Dict,
        file,
        filename: str,
        project_name: str,
        requester_email: str,
    ):

        fields = {
            'component': (f'{filename}.tar', file, 'application/zip'),
            'wsConfig':
            json.dumps({
                'apiKey': self.api_key,
                'projectName': project_name,
                'productToken': self.product_token,
                'requesterEmail': requester_email,
                'userKey': self.creds.user_key(),
                'wssUrl': self.wss_endpoint,
                'extraWsConfig': extra_whitesource_config,
            })
        }

        # add extra whitesource config
        for key, value in extra_whitesource_config.items():
            fields[key] = value

        m = MultipartEncoder(fields=fields, )
        return self.request(
            method='POST',
            url=self.routes.post_component(),
            headers={'Content-Type': m.content_type},
            data=m,
        )
Esempio n. 13
0
    def get_insert(self, table: str, data_dict: typing.Dict,
                   key_type_dict: typing.Dict) -> str:
        """
        获取插入语句
        :param data_dict: 数据字典
        :param key_type_dict: 要新增的数据字段及类型
        :param table: 表名
        :return:
        """
        _item_key_list = []  # 新增数据字段列表
        _item_value_list = []  # 新增数据字段对应数据列表
        # 要新增的数据字段及类型
        for _key, _type in key_type_dict.items():
            if _key not in data_dict:
                continue
            _str = self.item_data_2_str(data=data_dict[_key], value_type=_type)
            if _str is None or _str == 'None':
                continue
            _item_key_list.append(f"`{_key}`")  # 添加键
            _item_value_list.append(f"{_str}")

        # 拼接语句
        ret_sql = f"INSERT INTO {self._name_str(d=table)} ({', '.join(_item_key_list)}) VALUES ({', '.join(_item_value_list)})"
        # print(f"查询语句:{ret_sql}")  # 调试
        return ret_sql  # 返回
Esempio n. 14
0
def django_update(Model: django.db.models.Model, id: str, prev_updated: float,
                  input: ty.Dict, handlers: ty.Dict):
    response = {"error": False, "message": None, "node": None}
    try:
        inst = Model.objects.get(id=id)
        assert prev_updated == inst.updated.timestamp()
        for ik, iv in input.items():
            snake_ik = inflection.underscore(ik)
            if snake_ik in handlers:
                inst = handlers[ik](iv)
            else:
                setattr(inst, snake_ik, iv)
        inst.save()
        return {**response, "node": inst}
    except Model.DoesNotExist:
        return {
            **response, "error": True,
            "message":
            f"An `{Model.__name__}` does not exist with the id `{id}`"
        }
    except Exception as err:
        return {
            **response, "error":
            True,
            "message":
            f"Updating {Model.__name__} with id<{id}> failed because: {err}"
        }
Esempio n. 15
0
def format_content(cves: typing.Dict):
    src_tpl = '    <li><font color="red">%(cnt)d</font>条由 [<a href="%(url)s">%(src)s</a>] 提供</li>'
    mail_tpl = """
<h3>发现最新威胁情报<font color="red">%(total)d</font>条:</h3>
<ul>
%(src_infos)s
</ul>
<h3>详细漏洞清单如下:</h3>
<br/>
%(cve_infos)s

<br/><br/>
++++++++++++++++++++++++++++++++++++++++++++++
<br/>
<font color="red">【情报收集与播报支持】</font> https://skactor.github.io/threat-intelligence/
"""
    src_infos = []
    cve_infos = []
    total = 0
    for source, _cves in cves.items():
        cnt = len(_cves)
        total += cnt
        src_infos.append(
            src_tpl % {"cnt": cnt, "url": source.home_page, "src": source.name_ch}
        )
        list(map(lambda cve: cve_infos.append(cve.to_html()), _cves))

    content = mail_tpl % {
        "total": total,
        "src_infos": "\n".join(src_infos),
        "cve_infos": "\n".join(cve_infos),
    }
    return content
Esempio n. 16
0
    def get_plugins_from_nested_dict(
        source_dict: typing.Dict,
        plugin_list: typing.List[typing.Dict] = None
    ) -> typing.List[typing.Dict]:
        if not plugin_list:
            plugin_list = []

        for key, val in source_dict.items():
            if key == "plugins":
                if isinstance(val, list):
                    for list_item in val:
                        if isinstance(list_item, dict):
                            if "config" in list_item and "name" in list_item and "kind" in list_item:
                                plugin_list.append(list_item)
                continue
            if isinstance(val, dict):
                plugin_list.extend(
                    PluginManager.get_plugins_from_nested_dict(
                        val, plugin_list))
            if isinstance(val, list):
                for list_item in val:
                    if isinstance(list_item, dict):
                        plugin_list.extend(
                            PluginManager.get_plugins_from_nested_dict(
                                list_item, plugin_list))
        return plugin_list
Esempio n. 17
0
def print_failures(errors: typing.Dict, parse_context):
    """Print the failures (as listed in `errors`) that occurred in all the tests

    `parse_context`: a function that takes some locals() dict and mangles it into a list of key, value tuples.
    (Changing the key names in the process)
    """
    if not errors:
        # Good job! No failures to print.
        return

    print('-' * 50)
    print(f'{"Failure details ":-^50}')
    for test_function, error in errors.items():
        print('-' * 50)
        print(
            f'Test {test_function.__name__} failed with the following stacktrace:'
        )
        # first argument is exception type, now inferred from second arg and ignored. Signature seems to be legacy.
        exception_tb = '\n'.join(
            traceback.format_exception(None, error, error.__traceback__))
        print_indent_lines(exception_tb, indent=2)
        # We use the traceback module to walk the traceback stack and give us a list of frame summaries.
        stack_summary = traceback.StackSummary.extract(traceback.walk_tb(
            error.__traceback__),
                                                       capture_locals=True)
        # The info we want is in the last frame (the one containing the `assert`)
        variables_of_interest = stack_summary[-1].locals
        variables_of_interest = parse_context(variables_of_interest)
        debug_str = '\n'.join([f'{k}={v}' for k, v in variables_of_interest])
        print('Where: ')
        print_indent_lines(debug_str, indent=2)
 def build_headers(self, custom_headers: typing.Dict = {}) -> typing.Dict:
     n_dict = {}
     for k, v in self.default_headers.items():
         n_dict[k] = self.default_headers[k]
     for k, v in custom_headers.items():
         n_dict[k] = custom_headers[k]
     return n_dict
Esempio n. 19
0
 def convert_input(self, in_data: typing.Dict, **kwargs) -> typing.Dict:
     in_data["service_schedule_type"] = in_data[
         "service_schedule_type"].lower()
     in_data[
         "service_schedule_typicality"] = schema_utils.numbered_type_enum_key(
             in_data["service_schedule_typicality"], default_0=True)
     return {k: v for k, v in in_data.items() if v}
Esempio n. 20
0
def angstrom_to_bohr(lattice_parameters: typing.Dict,
                     precision=6) -> typing.Dict:
    """
    Convert lattice constant entries in lattice parameter dictionary
    to bohr, from angstrom.

    Parameters
    ----------
    lattice_parameters : dict
        Lattice constants and angles.
        Each dictionary value has a float value and str unit.
        Valid keys = a, b, c, alpha, beta, gamma
    precision : int, optional
        Number of decimal places to round constants to

    Returns
    -------
    Lattice_parameters : dict
        Lattice constants and angles.
        (a,b,c) will be in bohr.

    """

    ang_to_bohr = unit_conversions.angstrom_to_bohr
    for key, parameter in lattice_parameters.items():
        if parameter.unit == 'angstrom':
            new_value = round(parameter.value * ang_to_bohr, precision)
            lattice_parameters[key] = Set(new_value, 'bohr')
    return lattice_parameters
def ensure_bson(documents: typing.List[typing.Dict],
                mapping: typing.Dict) -> typing.List:
    for document in documents:
        for key, func in mapping.items():
            if key in document:
                document[key] = func(document[key])
    return documents
Esempio n. 22
0
def validate_simple_dict(dict_: typing.Dict) -> None:
    """
    A simple dict validator with:
    key as 0-9a-zA-Z-_. based string
    value as either: string, int, bool, float types
    """
    for key in dict_.keys():
        if not isinstance(key, str):
            raise ValidationError(
                'Dictionary key "{}" is not a string'.format(key))
        regex_validator = Regexp(regex=(re.compile("^[0-9a-zA-Z-_.]+$")))
        try:
            regex_validator(key)
        except ValidationError as exc:
            raise ValidationError('Dictionary key "{}" incorrect : {}'.format(
                key, str(exc))) from exc

    # INFO - G.M - We assume float is the type used for float conversion,
    # this may change depending
    # on how the json parser is configured.
    float_type = float
    invalid_key_value_pairs = [
        (key, value) for key, value in dict_.items()
        if not isinstance(value, (str, int, bool, float_type, type(None)))
    ]
    if invalid_key_value_pairs:
        raise ValidationError(
            "Only string/number/null values are allowed as dictionary value. Invalid values: {}"
            .format(invalid_key_value_pairs))
Esempio n. 23
0
 async def resolve_update(_, info, id: str, input: ty.Dict):
     response = {
         "error": False,
         "message": "Create Successfull!",
         "node": None
     }
     try:
         inst = User.objects.get(id=id)
         if "password" in input:
             password = input.pop("password")
             inst.password = generate_hashed_password(password)
         for k, v in input.items():
             setattr(inst, k, v)
         inst.save()
         return {**response, "node": inst}
     except User.DoesNotExist:
         return {
             **response, "error": True,
             "message": f"The object with id {id} could not be found."
         }
     except Exception as err:
         return {
             **response, "error": True,
             "message": f"Create Error: {err}"
         }
Esempio n. 24
0
 def config_to_camelCase(self, config: typing.Dict) -> typing.Dict:
     new_dict = dict()
     for key, val in config.items():
         new_dict.update({
             presalytics.story.util.to_camel_case(key): val
         })
     return new_dict
Esempio n. 25
0
 def create(self, account: Account, skills: typing.Dict) -> HighScore:
     skills = {
         name: Skill(**skill_dict)
         for name, skill_dict in skills.items()
     }
     highscore = HighScore(account.id, **skills)
     return self.highscore_repository.create(highscore)
Esempio n. 26
0
def apiurl(args: argparse.Namespace,
           subpath: str,
           query_args: typing.Dict = None) -> str:
    """Creates the endpoint URL for a given combination of parameters.

    :param argparse.Namespace args: Namespace with many fields relevant to the endpoint.
    :param str subpath: added to end of URL to further specify endpoint.
    :param typing.Dict query_args: specifics such as API key and search parameters that complete the URL.
    :rtype str:
    """
    if query_args is None:
        query_args = {}
    if args.api_key is not None:
        query_args["api_key"] = args.api_key
    if query_args:
        # a_list      = [<expression> for <l-expression> in <expression>]
        '''
        vector result;
        for (l_expression: expression) {
            result.push_back(expression);
        }
        '''
        # an_iterator = (<expression> for <l-expression> in <expression>)
        return args.url + subpath + "?" + "&".join(
            "{x}={y}".format(
                x=x, y=quote_plus(y if isinstance(y, str) else json.dumps(y)))
            for x, y in query_args.items())
    else:
        return args.url + subpath
Esempio n. 27
0
 def subdict_to_string(dict_: typing.Dict) -> str:
     """Converts a style dict into a css string."""
     str_ = '{'
     for attr, value in dict_.items():
         str_ += f'{attr}:{value};'
     str_ += '}'
     return str_
Esempio n. 28
0
def generate_css_from_dict(css_dict: typing.Dict) -> str:
    """Returns a css string from a dictionary.

    Example input:

    {
        'p': {
            'font-size': '1em',
            'color': 'blue',
        },
        'h1': {
            'font-size': '2em',
            'color': 'red',
        },
    }
    """

    def subdict_to_string(dict_: typing.Dict) -> str:
        """Converts a style dict into a css string."""
        str_ = '{'
        for attr, value in dict_.items():
            str_ += f'{attr}:{value};'
        str_ += '}'
        return str_

    str_ = ''
    for elem, styles in css_dict.items():
        style_string = subdict_to_string(styles)
        str_ += f'{elem}{style_string}'
    return str_
Esempio n. 29
0
def test_load_good_data(checkpoint_data: typing.Dict):
    # GIVEN
    checkpoint_obj = schemas.CheckpointSchema().load(checkpoint_data)

    # THEN
    assert isinstance(checkpoint_obj, mbta_models.Checkpoint)
    for key, value in checkpoint_data.items():
        assert getattr(checkpoint_obj, key) == value
Esempio n. 30
0
def dict2attr(dict_state: t.Dict) \
             -> parts.AttributeDict:
    state = parts.AttributeDict()

    for k, v in dict_state.items():
        state[k] = v

    return state
Esempio n. 31
0
    def storeProductIndex(self, _product_dict: typing.Dict):
        for product, v in _product_dict.items():
            # for each product, compute index of product
            tmp_data_list = []
            for instrument in v['InstrumentList']:
                self.instrument_day_data_cur.execute(
                    "SELECT openprice, highprice, lowprice, closeprice, "
                    "volume, openinterest FROM {} WHERE tradingday='{}'".format(
                        instrument, v['TradingDay']
                    )
                )
                values = self.instrument_day_data_cur.fetchone()
                tmp_data_list.append(dict(zip(
                    INDEX_TABLE_KEYS[1:], values
                )))

            openprice_arr = self._get_arr(tmp_data_list, 'OpenPrice')
            highprice_arr = self._get_arr(tmp_data_list, 'HighPrice')
            lowprice_arr = self._get_arr(tmp_data_list, 'LowPrice')
            closeprice_arr = self._get_arr(tmp_data_list, 'ClosePrice')
            volume_arr = self._get_arr(tmp_data_list, 'Volume')
            openinterest_arr = self._get_arr(tmp_data_list, 'OpenInterest')

            total_openinterest = openinterest_arr.sum()
            if total_openinterest == 0:
                index_dict = {
                    'TradingDay': v['TradingDay'],
                    'OpenPrice': openprice_arr.mean(),
                    'HighPrice': highprice_arr.mean(),
                    'LowPrice': lowprice_arr.mean(),
                    'ClosePrice': closeprice_arr.mean(),
                    'Volume': int(volume_arr.sum()),
                    'OpenInterest': total_openinterest,
                }
            else:
                tmp_rate = openinterest_arr / float(total_openinterest)
                index_dict = {
                    'TradingDay': v['TradingDay'],
                    'OpenPrice': np.sum(tmp_rate * openprice_arr),
                    'HighPrice': np.sum(tmp_rate * highprice_arr),
                    'LowPrice': np.sum(tmp_rate * lowprice_arr),
                    'ClosePrice': np.sum(tmp_rate * closeprice_arr),
                    'Volume': int(volume_arr.sum()),
                    'OpenInterest': total_openinterest
                }
            # true store part
            try:
                self._store_product_index(product, index_dict)
            except psycopg2.DatabaseError as e:
                self.product_index_con.rollback()
                if e.pgcode == '42P01':
                    logging.warning(e)
                    self._create_product_index_table(product)
                    self._store_product_index(product, index_dict)
                else:
                    logging.error(e)
                    sys.exit(1)
Esempio n. 32
0
    def storeDominantIndex(self, _product_dict: typing.Dict):
        for product, v in _product_dict.items():
            # for each product, compute index of dominant
            dominant = v['Dominant']
            self.instrument_day_data_cur.execute(  # get the dominant data
                "SELECT openprice, highprice, lowprice, closeprice, "
                "volume, openinterest FROM {} WHERE tradingday='{}'".format(
                    dominant, v['TradingDay']
                )
            )
            values = self.instrument_day_data_cur.fetchone()
            cur_data = dict(zip(INDEX_TABLE_KEYS[1:], values))
            self.instrument_day_data_cur.execute(  # get the closeprice of last data
                "SELECT closeprice FROM {} WHERE tradingday<'{}' "
                "ORDER BY tradingday DESC LIMIT 1".format(
                    dominant, v['TradingDay']
                )
            )
            values = self.instrument_day_data_cur.fetchone()
            if values is not None:
                return_rate = cur_data['ClosePrice'] / values[0]
            else:
                return_rate = 1.0

            last_index_price = self._get_last_dominant_index(
                product, v['TradingDay']
            )
            if last_index_price is None:
                last_index_price = 1000.0
            new_index_price = last_index_price * return_rate
            price_scale = new_index_price / cur_data['ClosePrice']
            new_openprice = cur_data['OpenPrice'] * price_scale
            new_highprice = cur_data['HighPrice'] * price_scale
            new_lowprice = cur_data['LowPrice'] * price_scale

            self.dominant_index_cur.execute(
                "INSERT INTO {} VALUES "
                "(%s, %s, %s, %s, %s, %s, %s) "
                "ON CONFLICT (TradingDay) DO UPDATE SET "
                "OpenPrice = EXCLUDED.OpenPrice,"
                "HighPrice = EXCLUDED.HighPrice,"
                "LowPrice = EXCLUDED.LowPrice,"
                "ClosePrice = EXCLUDED.ClosePrice,"
                "Volume = EXCLUDED.Volume,"
                "OpenInterest = EXCLUDED.OpenInterest"
                "".format(product),
                [
                    v['TradingDay'],
                    new_openprice, new_highprice,
                    new_lowprice, new_index_price,
                    cur_data['Volume'], cur_data['OpenInterest']
                ]
            )
            self.dominant_index_con.commit()
Esempio n. 33
0
 def _check_for_invalid_ids(cls, multi_ids_dict: typing.Dict, entity_type: str):
     check_result = CheckResult(check_name=CHECK_NAMES.check_valid_ids, error_message=[])
     if not multi_ids_dict:
         check_result.result = RESULT.FAILURE
         check_result.error_message.append("No ids found.")
     for k, values in multi_ids_dict.items():
         wrong_ids = [id for id in values if not cls._is_id_valid(id)]
         if wrong_ids:
             check_result.error_message.append("Invalid " + str(k) + "(s) for " + str(entity_type) + ": " + str(wrong_ids))
             check_result.result = RESULT.FAILURE
     return check_result
Esempio n. 34
0
 def storeInstrumentDayData(self, _data_dict: typing.Dict):
     for instrument, v in _data_dict.items():
         try:
             self._store_instrument_day_data(instrument, v)
         except psycopg2.DatabaseError as e:
             self.instrument_day_data_con.rollback()
             if e.pgcode == '42P01':
                 logging.warning(e)
                 self._create_instrument_day_data_table(instrument)
                 self._store_instrument_day_data(instrument, v)
             else:
                 logging.error(e)
                 sys.exit(1)
Esempio n. 35
0
def init_validator(hints: typing.Dict, parent: typing.Optional[Validator]=None) -> Validator:
    """
    Returns a new validator instance from a given dictionary of type hints
    """
    validator = Validator(parent)

    for name, hint in hints.items():
        if hint is None:
            hint = type(None)

        root_parser = get_parser(None, hint, validator)
        syntax_tree = visit(root_parser)

        validator.roots[name] = syntax_tree

    return validator
Esempio n. 36
0
    def from_dict(cls, model_dict: typing.Dict) -> typing.Type['Model']:
        """Creates a model from a dictionary.

        Parameters
        ----------
        model_dict :
            Dictionary containing the model.
        """

        model = cls()

        # iterate over items
        for name, attribute in list(model_dict.items()):

            # we determine if we the item is known by the model by looking for
            # a setter with same name.

            if hasattr(model, f'set_{name}'):

                # get the set function
                set = getattr(model, f'set_{name}')

                # we retrieve the actual class from the signature
                for label, item in attribute.items():
                    item_cls = set.__func__.__annotations__['item']
                    is_typed = hasattr(item_cls, "_glotaran_model_attribute_typed")
                    if isinstance(item, dict):
                        if is_typed:
                            if 'type' not in item:
                                raise Exception(f"Missing type for attribute '{name}'")
                            item_type = item['type']

                            if item_type not in item_cls._glotaran_model_attribute_types:
                                raise Exception(f"Unknown type '{item_type}' "
                                                f"for attribute '{name}'")
                            item_cls = \
                                item_cls._glotaran_model_attribute_types[item_type]
                        item['label'] = label
                        set(label, item_cls.from_dict(item))
                    elif isinstance(item, list):
                        if is_typed:
                            if len(item) < 2 and len(item) != 1:
                                raise Exception(f"Missing type for attribute '{name}'")
                            item_type = item[1] if len(item) != 1 and \
                                hasattr(item_cls, 'label') else item[0]

                            if item_type not in item_cls._glotaran_model_attribute_types:
                                raise Exception(f"Unknown type '{item_type}' "
                                                f"for attribute '{name}'")
                            item_cls = \
                                item_cls._glotaran_model_attribute_types[item_type]
                        item = [label] + item
                        set(label, item_cls.from_list(item))
                del model_dict[name]

            elif hasattr(model, f'add_{name}'):

                # get the set function
                add = getattr(model, f'add_{name}')

                # we retrieve the actual class from the signature
                for item in attribute:
                    item_cls = add.__func__.__annotations__['item']
                    is_typed = hasattr(item_cls, "_glotaran_model_attribute_typed")
                    if isinstance(item, dict):
                        if is_typed:
                            if 'type' not in item:
                                raise Exception(f"Missing type for attribute '{name}'")
                            item_type = item['type']

                            if item_type not in item_cls._glotaran_model_attribute_types:
                                raise Exception(f"Unknown type '{item_type}' "
                                                f"for attribute '{name}'")
                            item_cls = \
                                item_cls._glotaran_model_attribute_types[item_type]
                        add(item_cls.from_dict(item))
                    elif isinstance(item, list):
                        if is_typed:
                            if len(item) < 2 and len(item) != 1:
                                raise Exception(f"Missing type for attribute '{name}'")
                            item_type = item[1] if len(item) != 1 and \
                                hasattr(item_cls, 'label') else item[0]

                            if item_type not in item_cls._glotaran_model_attribute_types:
                                raise Exception(f"Unknown type '{item_type}' "
                                                f"for attribute '{name}'")
                            item_cls = \
                                item_cls._glotaran_model_attribute_types[item_type]
                        add(item_cls.from_list(item))
                del model_dict[name]

        return model