예제 #1
0
 def __reduce__(self):
     'Return state information for pickling'
     inst_dict = vars(self).copy()
     for k in vars(OrderedDict()):
         inst_dict.pop(k, None)
     return self.__class__, (), inst_dict or None, None, iter(self.items())
예제 #2
0
    Escape special characters in a string.
    """
    if isinstance(pattern, str):
        return pattern.translate(_special_chars_map)
    else:
        pattern = str(pattern, 'latin1')
        return pattern.translate(_special_chars_map).encode('latin1')


Pattern = type(sre_compile.compile('', 0))
Match = type(sre_compile.compile('', 0).match(''))

# --------------------------------------------------------------------
# internals

_cache = OrderedDict()

_MAXCACHE = 512


def _compile(pattern, flags):
    # internal: compile pattern
    if isinstance(flags, RegexFlag):
        flags = flags.value
    try:
        return _cache[type(pattern), pattern, flags]
    except KeyError:
        pass
    if isinstance(pattern, Pattern):
        if flags:
            raise ValueError(
예제 #3
0
# -*- coding: utf-8 -*-
"""
@author: Li Luyao
"""

from _collections import OrderedDict
import json

d = OrderedDict()
d['foo'] = 1
d['bar'] = 2
d['spam'] = 3
d['grok'] = 4

for key in d:  # for key in d.keys()
    print(key, d[key])

#OrderedDict在进行json编码时,可以精准控制各字段的顺序
json.dumps(d)  #Outputs '{"foo":1, --snip--}'
예제 #4
0
    def __new__(metacls, cls, bases, classdict):
        # an Enum class is final once enumeration items have been defined; it
        # cannot be mixed with other types (int, float, etc.) if it has an
        # inherited __new__ unless a new __new__ is defined (or the resulting
        # class will fail).
        member_type, first_enum = metacls._get_mixins_(bases)
        __new__, save_new, use_args = metacls._find_new_(
            classdict, member_type, first_enum)

        # save enum items into separate mapping so they don't get baked into
        # the new class
        enum_members = {k: classdict[k] for k in classdict._member_names}
        for name in classdict._member_names:
            del classdict[name]

        # adjust the sunders
        _order_ = classdict.pop("_order_", None)

        # check for illegal enum names (any others?)
        invalid_names = set(enum_members) & {
            "mro",
        }
        if invalid_names:
            raise ValueError("Invalid enum member name: {0}".format(
                ",".join(invalid_names)))

        # create a default docstring if one has not been provided
        if "__doc__" not in classdict:
            classdict["__doc__"] = "An enumeration."

        # create our new Enum type
        enum_class = super().__new__(metacls, cls, bases, classdict)
        enum_class._member_names_ = []  # names in definition order
        enum_class._member_map_ = OrderedDict()  # name->value map
        enum_class._member_type_ = member_type

        # save attributes from super classes so we know if we can take
        # the shortcut of storing members in the class dict
        base_attributes = {a for b in enum_class.mro() for a in b.__dict__}

        # Reverse value->name map for hashable values.
        enum_class._value2member_map_ = {}

        # If a custom type is mixed into the Enum, and it does not know how
        # to pickle itself, pickle.dumps will succeed but pickle.loads will
        # fail.  Rather than have the error show up later and possibly far
        # from the source, sabotage the pickle protocol for this class so
        # that pickle.dumps also fails.
        #
        # However, if the new class implements its own __reduce_ex__, do not
        # sabotage -- it's on them to make sure it works correctly.  We use
        # __reduce_ex__ instead of any of the others as it is preferred by
        # pickle over __reduce__, and it handles all pickle protocols.
        if "__reduce_ex__" not in classdict:
            if member_type is not object:
                methods = (
                    "__getnewargs_ex__",
                    "__getnewargs__",
                    "__reduce_ex__",
                    "__reduce__",
                )
                if not any(m in member_type.__dict__ for m in methods):
                    _make_class_unpicklable(enum_class)

        # instantiate them, checking for duplicates as we go
        # we instantiate first instead of checking for duplicates first in case
        # a custom __new__ is doing something funky with the values -- such as
        # auto-numbering ;)
        for member_name in classdict._member_names:
            value = enum_members[member_name]
            if not isinstance(value, tuple):
                args = (value, )
            else:
                args = value
            if member_type is tuple:  # special case for tuple enums
                args = (args, )  # wrap it one more time
            if not use_args:
                enum_member = __new__(enum_class)
                if not hasattr(enum_member, "_value_"):
                    enum_member._value_ = value
            else:
                enum_member = __new__(enum_class, *args)
                if not hasattr(enum_member, "_value_"):
                    if member_type is object:
                        enum_member._value_ = value
                    else:
                        enum_member._value_ = member_type(*args)
            value = enum_member._value_
            enum_member._name_ = member_name
            enum_member.__objclass__ = enum_class
            enum_member.__init__(*args)
            # If another member with the same value was already defined, the
            # new member becomes an alias to the existing one.
            for name, canonical_member in enum_class._member_map_.items():
                if canonical_member._value_ == enum_member._value_:
                    enum_member = canonical_member
                    break
            else:
                # Aliases don't appear in member names (only in __members__).
                enum_class._member_names_.append(member_name)
            # performance boost for any member that would not shadow
            # a DynamicClassAttribute
            if member_name not in base_attributes:
                setattr(enum_class, member_name, enum_member)
            # now add to _member_map_
            enum_class._member_map_[member_name] = enum_member
            try:
                # This may fail if value is not hashable. We can't add the value
                # to the map, and by-value lookups for this value will be
                # linear.
                enum_class._value2member_map_[value] = enum_member
            except TypeError:
                pass

        # double check that repr and friends are not the mixin's or various
        # things break (such as pickle)
        for name in ("__repr__", "__str__", "__format__", "__reduce_ex__"):
            class_method = getattr(enum_class, name)
            obj_method = getattr(member_type, name, None)
            enum_method = getattr(first_enum, name, None)
            if obj_method is not None and obj_method is class_method:
                setattr(enum_class, name, enum_method)

        # replace any other __new__ with our own (as long as Enum is not None,
        # anyway) -- again, this is to support pickle
        if Enum is not None:
            # if the user defined their own __new__, save it before it gets
            # clobbered in case they subclass later
            if save_new:
                enum_class.__new_member__ = __new__
            enum_class.__new__ = Enum.__new__

        # py3 support for definition order (helps keep py2/py3 code in sync)
        if _order_ is not None:
            if isinstance(_order_, str):
                _order_ = _order_.replace(",", " ").split()
            if _order_ != enum_class._member_names_:
                raise TypeError("member order does not match _order_")

        return enum_class
예제 #5
0
파일: utilities.py 프로젝트: paepcke/birds
    def find_class_paths(cls, data_root):
        '''
        Given a root directory, return an Ordered dict
        mapping class names to lists of directories
        that contain at least one sample of that class. 
        
        Both the class names (i.e. keys), and the
        lists of directories (i.e. values) will be
        naturally sorted, and absolute.
        
        Directory names that begin with a period ('.')
        are excluded.

        The assumption is that the names of the last
        path element of the returned directories are 
        class names. This assumption is often made in 
        torchvision packages.  

        :param data_root: root directory for search 
        :type data_root: str
        :return dict mapping target classes to a list
            of directories that contain samples of that
            class. The directories will be Path objs
        :rtype Ordered{str : [Path]}
        '''

        # If path is relative, compute abs
        # path relative to current dir:
        if not os.path.isabs(data_root):
            data_root = os.path.join(os.path.dirname(__file__), data_root)
        class_paths = set([])
        for root, _dirs, files in os.walk(data_root):

            if len(files) == 0:
                # Found only directories:
                continue

            # For convenience, turn the file paths
            # into Path objects:
            file_Paths = [Path(name) for name in files]
            root_Path = Path(root)

            # Pick out files with an image extension:
            full_paths = []
            for file_path in file_Paths:
                if file_path.suffix in cls.IMG_EXTENSIONS \
                   and not file_path.parent.stem.startswith('.'):
                    full_paths.append(
                        Path.joinpath(root_Path, file_path).parent)

            # Using union in this loop guarantees
            # uniqeness of the gathered class names:

            class_paths = class_paths.union(set(full_paths))

        # Order the paths so that all machines
        # have the same sample-id assignements later:

        class_paths = natsort.natsorted(list(class_paths))

        # Get dict {class-name : [paths-to-samples-of-that-class]}
        class_path_dict = OrderedDict()

        for class_path in class_paths:
            try:
                # dict[class-name] gets more
                # paths that hold samples of class-name:
                class_path_dict[class_path.stem].append(class_path)
            except KeyError:
                class_path_dict[class_path.stem] = [class_path]

        # Now ensure that the list of directories,
        # (i.e. the values) for each class-name's entry
        # are also sorted:

        # Use copy of class_path_dict for iteration,
        # b/c we modify class_path_dict in the loop:

        class_path_dict_copy = class_path_dict.copy()
        for class_name, dirs in class_path_dict_copy.items():
            class_path_dict[class_name] = natsort.natsorted(dirs)

        return class_path_dict
#########   positive_date  ##########

for xx in negative_dates:
    
    try:
        
        file1 = open(xx+".txt" ,encoding="utf8")
        print (xx)

        line = file1.read()
        #whitespace
        line=" ".join(line.split())
        #Number
        line = re.sub(r'\d+', '', line)
        #duplicate
        line_modified = OrderedDict().fromkeys(line.split())
        line=(' '.join(line_modified))

        #spl symbol
        for i in bad_chars : 
            line = line.replace(i, '')
            #print ("removed  " +i)

        words = line.split()


        #print ("hello-1")
               
        for r in words:
            if not r in stop_words:
                appendFile = open('cleaned_negative/'+xx+'.txt','a',encoding="utf8")
예제 #7
0
    def get_products(self):
        mydb = mysql.connector.connect(host='localhost',
                                       user='******',
                                       passwd='Livingstone2#',
                                       database='pos')
        mycursor = mydb.cursor()
        _stocks = OrderedDict()
        _stocks['product_code'] = {}
        _stocks['product_name'] = {}
        _stocks['product_weight'] = {}
        _stocks['in_stock'] = {}
        _stocks['sold_items'] = {}
        _stocks['order_date'] = {}
        _stocks['last_purchase'] = {}

        product_code = []
        product_name = []
        product_weight = []
        in_stock = []
        sold_items = []
        order_date = []
        last_purchase = []

        sql = 'SELECT *FROM stocks '
        mycursor.execute(sql)
        products = mycursor.fetchall()
        for product in products:
            product_code.append(product[0])
            name = product[1]
            if len(name) > 10:
                name = name[:10] + '...'
            product_name.append(name)
            product_weight.append(product[2])
            in_stock.append(product[3])
            try:
                sold_items.append(product[4])
            except KeyError:
                sold_items.append('')
            try:
                order_date.append(product[5])
            except KeyError:
                order_date.append('')
            try:
                last_purchase.append(product[6])
            except KeyError:
                last_purchase.append('')

        # print(designations)
        products_length = len(product_code)
        idx = 0
        while idx < products_length:
            _stocks['product_code'][idx] = product_code[idx]
            _stocks['product_name'][idx] = product_name[idx]
            _stocks['product_weight'][idx] = product_weight[idx]
            _stocks['in_stock'][idx] = in_stock[idx]
            _stocks['sold_items'][idx] = sold_items[idx]
            _stocks['order_date'][idx] = order_date[idx]
            _stocks['last_purchase'][idx] = last_purchase[idx]

            idx += 1

        return _stocks
예제 #8
0
from _collections import OrderedDict

# сортировка по ключу
a = {'cat': 5, 'dog': 2, 'mouse': 4}
new_a = OrderedDict(sorted(a.items(), key=lambda x: x[0]))
print(new_a)
# OrderedDict([('cat', 5), ('dog', 2), ('mouse', 4)])

# сортировка по значению
b = {'cat': 5, 'dog': 2, 'mouse': 4}
new_b = OrderedDict(sorted(b.items(), key=lambda x: x[1]))
print(new_b)
# OrderedDict([('dog', 2), ('mouse', 4), ('cat', 5)])

# изменение порядка в отсортированном словаре
new_b.move_to_end('mouse')
print(new_b)
# OrderedDict([('dog', 2), ('cat', 5), ('mouse', 4)])

new_b.move_to_end('mouse', last=False)
print(new_b)
# OrderedDict([('mouse', 4), ('dog', 2), ('cat', 5)])

new_b.popitem()
print(new_b)
# OrderedDict([('mouse', 4), ('dog', 2)])

new_b.popitem(last=False)
print(new_b)
# OrderedDict([('dog', 2)])
예제 #9
0
def import_model_sbml(sbml_model: Union[str, 'libsbml.Model'],
                      condition_table: Optional[Union[str,
                                                      pd.DataFrame]] = None,
                      observable_table: Optional[Union[str,
                                                       pd.DataFrame]] = None,
                      measurement_table: Optional[Union[str,
                                                        pd.DataFrame]] = None,
                      model_name: Optional[str] = None,
                      model_output_dir: Optional[str] = None,
                      verbose: Optional[Union[bool, int]] = True,
                      allow_reinit_fixpar_initcond: bool = True,
                      **kwargs) -> None:
    """
    Create AMICI model from PEtab problem

    :param sbml_model:
        PEtab SBML model or SBML file name.

    :param condition_table:
        PEtab condition table. If provided, parameters from there will be
        turned into AMICI constant parameters (i.e. parameters w.r.t. which
        no sensitivities will be computed).

    :param observable_table:
        PEtab observable table.

    :param measurement_table:
        PEtab measurement table.

    :param model_name:
        Name of the generated model. If model file name was provided,
        this defaults to the file name without extension, otherwise
        the SBML model ID will be used.

    :param model_output_dir:
        Directory to write the model code to. Will be created if doesn't
        exist. Defaults to current directory.

    :param verbose:
        Print/log extra information.

    :param allow_reinit_fixpar_initcond:
        See :class:`amici.ode_export.ODEExporter`. Must be enabled if initial
        states are to be reset after preequilibration.

    :param kwargs:
        Additional keyword arguments to be passed to
        :meth:`amici.sbml_import.SbmlImporter.sbml2amici`.
    """

    set_log_level(logger, verbose)

    logger.info(f"Importing model ...")

    # Get PEtab tables
    observable_df = petab.get_observable_df(observable_table)
    # to determine fixed parameters
    condition_df = petab.get_condition_df(condition_table)

    if observable_df is None:
        raise NotImplementedError("PEtab import without observables table "
                                  "is currently not supported.")

    # Model name from SBML ID or filename
    if model_name is None:
        if isinstance(sbml_model, libsbml.Model):
            model_name = sbml_model.getId()
        else:
            model_name = os.path.splitext(os.path.split(sbml_model)[-1])[0]

    if model_output_dir is None:
        model_output_dir = os.path.join(os.getcwd(), model_name)

    logger.info(f"Model name is '{model_name}'.\n"
                f"Writing model code to '{model_output_dir}'.")

    # Load model
    if isinstance(sbml_model, str):
        # from file
        sbml_reader = libsbml.SBMLReader()
        sbml_doc = sbml_reader.readSBMLFromFile(sbml_model)
        sbml_model = sbml_doc.getModel()
    else:
        # Create a copy, because it will be modified by SbmlImporter
        sbml_doc = sbml_model.getSBMLDocument().clone()
        sbml_model = sbml_doc.getModel()

    show_model_info(sbml_model)

    sbml_importer = amici.SbmlImporter(sbml_model)
    sbml_model = sbml_importer.sbml

    if observable_df is not None:
        observables, noise_distrs, sigmas = \
            get_observation_model(observable_df)

    logger.info(f'Observables: {len(observables)}')
    logger.info(f'Sigmas: {len(sigmas)}')

    if not len(sigmas) == len(observables):
        raise AssertionError(
            f'Number of provided observables ({len(observables)}) and sigmas '
            f'({len(sigmas)}) do not match.')

    # TODO: adding extra output parameters is currently not supported,
    #  so we add any output parameters to the SBML model.
    #  this should be changed to something more elegant
    # <BeginWorkAround>
    formulas = chain((val['formula'] for val in observables.values()),
                     sigmas.values())
    output_parameters = OrderedDict()
    for formula in formulas:
        # we want reproducible parameter ordering upon repeated import
        free_syms = sorted(sp.sympify(formula).free_symbols,
                           key=lambda symbol: symbol.name)
        for free_sym in free_syms:
            sym = str(free_sym)
            if sbml_model.getElementBySId(sym) is None and sym != 'time':
                output_parameters[sym] = None
    logger.debug(f"Adding output parameters to model: {output_parameters}")
    for par in output_parameters.keys():
        petab.add_global_parameter(sbml_model, par)
    # <EndWorkAround>

    # TODO: to parameterize initial states or compartment sizes, we currently
    #  need initial assignments. if they occur in the condition table, we
    #  create a new parameter initial_${startOrCompartmentID}.
    #  feels dirty and should be changed (see also #924)
    # <BeginWorkAround>
    initial_states = [
        col for col in condition_df if sbml_model.getSpecies(col) is not None
    ]
    initial_sizes = [
        col for col in condition_df
        if sbml_model.getCompartment(col) is not None
    ]
    fixed_parameters = []
    if len(initial_states) or len(initial_sizes):
        # add preequilibration indicator variable
        # NOTE: would only be required if we actually have preequilibration
        #  adding it anyways. can be optimized-out later
        if sbml_model.getParameter(PREEQ_INDICATOR_ID) is not None:
            raise AssertionError("Model already has a parameter with ID "
                                 f"{PREEQ_INDICATOR_ID}. Cannot handle "
                                 "species and compartments in condition table "
                                 "then.")
        indicator = sbml_model.createParameter()
        indicator.setId(PREEQ_INDICATOR_ID)
        indicator.setName(PREEQ_INDICATOR_ID)
        # Can only reset parameters after preequilibration if they are fixed.
        fixed_parameters.append(PREEQ_INDICATOR_ID)

    for assignee_id in initial_sizes + initial_states:
        init_par_id_preeq = f"initial_{assignee_id}_preeq"
        init_par_id_sim = f"initial_{assignee_id}_sim"
        for init_par_id in [init_par_id_preeq, init_par_id_sim]:
            if sbml_model.getElementBySId(init_par_id) is not None:
                raise ValueError(
                    "Cannot create parameter for initial assignment "
                    f"for {assignee_id} because an entity named "
                    f"{init_par_id} exists already in the model.")
            init_par = sbml_model.createParameter()
            init_par.setId(init_par_id)
            init_par.setName(init_par_id)
        assignment = sbml_model.getInitialAssignment(assignee_id)
        if assignment is None:
            assignment = sbml_model.createInitialAssignment()
            assignment.setSymbol(assignee_id)
        formula = f'{PREEQ_INDICATOR_ID} * {init_par_id_preeq} ' \
                  f'+ (1 - {PREEQ_INDICATOR_ID}) * {init_par_id_sim}'
        math_ast = libsbml.parseL3Formula(formula)
        assignment.setMath(math_ast)
    # <EndWorkAround>

    fixed_parameters.extend(
        get_fixed_parameters(sbml_model=sbml_model, condition_df=condition_df))

    logger.debug(f"Fixed parameters are {fixed_parameters}")
    logger.info(f"Overall fixed parameters: {len(fixed_parameters)}")
    logger.info(
        "Variable parameters: " +
        str(len(sbml_model.getListOfParameters()) - len(fixed_parameters)))

    # Create Python module from SBML model
    sbml_importer.sbml2amici(
        model_name=model_name,
        output_dir=model_output_dir,
        observables=observables,
        constant_parameters=fixed_parameters,
        sigmas=sigmas,
        allow_reinit_fixpar_initcond=allow_reinit_fixpar_initcond,
        noise_distributions=noise_distrs,
        verbose=verbose,
        **kwargs)
예제 #10
0
Created on 10-Apr-2019

@author: Sumedh.Tambe
'''
from _collections import OrderedDict


def Access(key, LRUDict):

    if (len(LRUDict) == 5):
        LRUDict.popitem()

    if key in LRUDict.keys():
        LRUDict.move_to_end(key, last=False)
    else:
        LRUDict[key] = ''
        LRUDict.move_to_end(key, last=False)
    print(LRUDict)
    return list(LRUDict)[0]


if __name__ == '__main__':
    LRUDict = OrderedDict()
    print(Access(20, LRUDict))
    print(Access(30, LRUDict))
    print(Access(40, LRUDict))
    print(Access(50, LRUDict))
    print(Access(60, LRUDict))
    print(Access(70, LRUDict))
    print(Access(30, LRUDict))
예제 #11
0
spreadsheet_map_2018 = OrderedDict({
    'FIPSCode': f'FIPSCodeDetailed',
    'Jurisdiction_Name': f'Jurisdiction',
    'State_Full': f'{year}State_Full',
    'State_Abbr': f'{year}State_Abbr',
    'C1a': f'{year}ByMailCountBallotsSent',
    'C1b': f'{year}ByMailCountBallotsReturned',
    'C1c': f'{year}ByMailRejUndeliverable',
    'C1d': f'{year}ByMailRejVoided',
    'C1e': f'{year}ByMailRejVotedInPerson',
    'C2a': f'{year}ByMailCountPermanentByMailTransmitted',
    'C3a': f'{year}ByMailCountCounted',
    'C4a': f'{year}ByMailCountByMailRejected',
    'C4b': f'{year}ByMailRejDeadline',
    'C4c': f'{year}ByMailRejSignatureMissing',
    'C4d': f'{year}ByMailRejWitnessSignature',
    'C4e': f'{year}ByMailRejNonMatchingSig',
    'C4f': f'{year}ByMailRejNoElectionOfficialSig',
    'C4g': f'{year}ByMailRejUnofficialEnvelope',
    'C4h': f'{year}ByMailRejBallotMissing',
    'C4i': f'{year}ByMailRejEnvelopeNotSealed',
    'C4j': f'{year}ByMailRejNoAddr',
    'C4k': f'{year}ByMailRejMultipleBallots',
    'C4l': f'{year}ByMailRejDeceased',
    'C4m': f'{year}ByMailRejAlreadyVoted',
    'C4n': f'{year}ByMailRejNoVoterId',
    'C4o': f'{year}ByMailRejNoBallotApplication',
    'C4p_Other': f'{year}ByMailRejOtherReason1',
    'C4p': f'{year}ByMailRejOtherReasonCount1',
    'C4q_Other': f'{year}ByMailRejOtherReason2',
    'C4q': f'{year}ByMailRejOtherReason2Count',
    'C4r_Other': f'{year}ByMailRejOtherReason3',
    'C4r': f'{year}ByMailRejOtherReason3Count',
    'D1a': f'{year}TotalCountVotesCast',
    'D2a': f'{year}TotalCountVotedAtPoll',
    'D2b': f'{year}TotalCountVotedEarlyPhysical',
    'D2Comments': f'{year}CommentsEarlyVotingPhysical',
    'D3a': f'{year}OperationsNumOfPrecincts',
    'D4a': f'{year}OperationsNumPollingPlacesElectDay',
    'D5a': f'{year}OperationsNumEarlyVotingPlaces',
    'D6': f'{year}OperationsNumPollWorkersElectDay',
    'D7': f'{year}OperationsNumPollWorkersEarlyVoting',
    'D6_D7Comments': f'{year}OperationsPollingStationComments',
    'D8a': f'{year}OperationsNumPollWorkers',
    'D8b': f'{year}OperationsPWUnder18',
    'D8c': f'{year}OperationsPW18_25',
    'D8d': f'{year}OperationsPW26_40',
    'D8e': f'{year}OperationsPW41_60',
    'D8f': f'{year}OperationsPW61_70',
    'D8g': f'{year}OperationsPW71Plus',
    'D8Comments': f'{year}OperationsPWComments',
    'D9': f'{year}OperationsPWRecruitingDifficulties',
    'E1a': f'{year}ProvisionalCountTotal',
    'E1b': f'{year}ProvisionalCountCountedFully',
    'E1c': f'{year}ProvisionalCountCountedPartially',
    'E1d': f'{year}ProvisionalCountRejected',
    'E1e_Other': f'{year}ProvisionalCountRejOther',
    'E1Comments': f'{year}CommentsProvisional',
    'E2a': f'{year}ProvisionalRejCountTotal',
    'E2b': f'{year}ProvisionalRejProvisionalNotRegistered',
    'E2c': f'{year}ProvisionalRejWrongJurisdiction',
    'E2d': f'{year}ProvisionalRejWrongPrecinct',
    'E2e': f'{year}ProvisionalRejNoID',
    'E2f': f'{year}ProvisionalRejIncomplete',
    'E2g': f'{year}ProvisionalRejBallotMissing',
    'E2h': f'{year}ProvisionalRejNoSig',
    'E2i': f'{year}ProvisionalRejSigNotMatching',
    'E2j': f'{year}ProvisionalRejAlreadyVoted',
    'E2k_Other': f'{year}ProvisionalRejOther1Txt',
    'E2k': f'{year}ProvisionalRej1Count',
    'E2l_Other': f'{year}ProvisionalRejOther2Txt',
    'E2l': f'{year}ProvisionalRejOther2Count',
    'E2m_Other': f'{year}ProvisionalRejOther3Txt',
    'E2Comments': f'{year}RejProvisionalOther3Count',
    'F1a': f'{year}TotalVoteCounted',
    'F1b': f'{year}TotalVotedPhysically',
    'F1c': f'{year}TotalVotedAbroad',
    'F1d': f'{year}TotalVoteByMail',
    'F1e': f'{year}TotalVoteProvisionalBallot',
    'F1f': f'{year}TotalVoteInPersonEarly',
    'F1g': f'{year}TotalVoteByMailOnlyJurisdiction',
    'F1h_Other': f'{year}TotalVoteOtherTxt',
    'F1h': f'{year}TotalVoteOtherCount'
})
예제 #12
0
from data_proc.inception_crop import InceptionCrop
import albumentations as A
# https://github.com/fabioperez/skin-data-augmentation/blob/master/train.py

aug_parameters = OrderedDict({
    'affine': {
        'rotation': 90,
        'shear': 20,
        'scale': [0.8, 1.2]
    },
    'hflip': True,
    'vflip': True,
    'rotate': True,
    'color_trans': {
        'brightness': (0.7, 1.3),
        'contrast': (0.7, 1.3),
        'saturation': (0.7, 1.3),
        'hue': (-0.1, 0.1)
    },
    'normalization': {
        'mean': (0.485, 0.456, 0.406),
        'std': (0.229, 0.224, 0.225)
    },
    'size': 320,
    'scale': (0.8, 1.2),
    'ratio': (0.8, 1.2)
})


class Random_rotate:
    def __init__(self, p=0.5):
예제 #13
0
    def __init__(self, dbstate, user, options_class, name, callback=None):
        uistate = user.uistate

        self.title = _('Find database loop')
        ManagedWindow.__init__(self, uistate, [], self.__class__)
        self.dbstate = dbstate
        self.uistate = uistate
        #self.db = CacheProxyDb(dbstate.db)
        self.db = dbstate.db

        top_dialog = Glade()

        top_dialog.connect_signals({
            "destroy_passed_object": self.close,
            "on_help_clicked": self.on_help_clicked,
            "on_delete_event": self.close,
        })

        window = top_dialog.toplevel
        title = top_dialog.get_object("title")
        self.set_window(window, title, self.title)

        # start the progress indicator
        self.progress = ProgressMeter(self.title,
                                      _('Starting'),
                                      parent=uistate.window)
        self.progress.set_pass(_('Looking for possible loop for each person'),
                               self.db.get_number_of_people())

        self.model = Gtk.ListStore(
            GObject.TYPE_STRING,  # 0==father id
            GObject.TYPE_STRING,  # 1==father
            GObject.TYPE_STRING,  # 2==son id
            GObject.TYPE_STRING,  # 3==son
            GObject.TYPE_STRING,  # 4==family gid
            GObject.TYPE_STRING)  # 5==loop number
        self.model.set_sort_column_id(
            Gtk.TREE_SORTABLE_UNSORTED_SORT_COLUMN_ID, 0)

        self.treeview = top_dialog.get_object("treeview")
        self.treeview.set_model(self.model)
        col0 = Gtk.TreeViewColumn('', Gtk.CellRendererText(), text=5)
        col1 = Gtk.TreeViewColumn(_('Gramps ID'),
                                  Gtk.CellRendererText(),
                                  text=0)
        col2 = Gtk.TreeViewColumn(_('Parent'), Gtk.CellRendererText(), text=1)
        col3 = Gtk.TreeViewColumn(_('Gramps ID'),
                                  Gtk.CellRendererText(),
                                  text=2)
        col4 = Gtk.TreeViewColumn(_('Child'), Gtk.CellRendererText(), text=3)
        col5 = Gtk.TreeViewColumn(_('Family ID'),
                                  Gtk.CellRendererText(),
                                  text=4)
        col1.set_resizable(True)
        col2.set_resizable(True)
        col3.set_resizable(True)
        col4.set_resizable(True)
        col5.set_resizable(True)
        col1.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
        col2.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
        col3.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
        col4.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
        col5.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
        self.treeview.append_column(col0)
        self.treeview.append_column(col1)
        self.treeview.append_column(col2)
        self.treeview.append_column(col3)
        self.treeview.append_column(col4)
        self.treeview.append_column(col5)
        self.treeselection = self.treeview.get_selection()
        self.treeview.connect('row-activated', self.rowactivated_cb)

        self.curr_fam = None
        people = self.db.get_person_handles()
        self.total = len(people)  # total number of people to process.
        self.count = 0  # current number of people completely processed
        self.loop = 0  # Number of loops found for GUI

        pset = OrderedDict()
        # pset is the handle list of persons from the current start of
        # exploration path to the current limit.  The use of OrderedDict
        # allows us to use it as a LIFO during recursion, as well as makes for
        # quick lookup.  If we find a loop, pset provides a nice way to get
        # the loop path.
        self.done = set()
        # self.done is the handle set of people that have been fully explored
        # and do NOT have loops in the decendent tree.  We use this to avoid
        # repeating work when we encounter one of these during the search.
        for person_handle in people:
            person = self.db.get_person_from_handle(person_handle)
            self.current = person
            self.parent = None
            self.descendants(person_handle, pset)

        # close the progress bar
        self.progress.close()

        self.show()
예제 #14
0
파일: smf.py 프로젝트: rn5l/rsc18
 def descent(self, loss, param_list, fullparam_list, subparam_list, idx, learning_rate=1.0, epsilon=1e-6, momentum=0.0 ):
     updates = OrderedDict()
     getattr(self, self.learn)(updates, loss, param_list, learning_rate, momentum=self.momentum)
     getattr(self, self.learn+'_sub')(updates, loss, fullparam_list, subparam_list, idx, learning_rate, momentum=self.momentum ) 
     return updates
예제 #15
0
from _collections import OrderedDict

favorite_languages = OrderedDict()

favorite_languages["jen"] = "python"
favorite_languages["sarah"] = "c"
favorite_languages["edward"] = "ruby"
favorite_languages["phil"] = "python"

# favorite_languages = {
#     'jen':"python",
#     'sarah':'c',
#     'edward':'ruby',
#     'phil':'python',}

for name,language in favorite_languages.items():
    print(name.title() + "'s favorite languages is " +
          language.title() + ".")
예제 #16
0
파일: clicker.py 프로젝트: StV2/game_test
    for key in objects:
        for action in objects[key].actions:
            try:
                action()
            except TypeError:
                action(EVENTS)


def draw_objects():
    for key in objects:
        obj_surf.blit(objects[key].sprite, (objects[key].x, objects[key].y))

    display.blit(obj_surf, (0, 0))


objects = OrderedDict()
objects["background"] = Obj()
objects["cookie"] = InteractiveObj()

sprites = {
    "background": pygame.image.load("resources/background.jpg"),
    "cookie": pygame.image.load("resources/cookie.gif")
}

sprites["cookie"].set_colorkey((255, 255, 255))
sprites["cookie"] = pygame.transform.scale(sprites["cookie"], (200, 200))
sprites["background"] = pygame.transform.scale2x(sprites["background"])

objects["cookie"].sprite = sprites["cookie"]
objects["cookie"].x = int(WIN_WIDTH / 2)
objects["cookie"].y = int(WIN_HEIGHT / 2)
예제 #17
0
import numpy as np
import matplotlib.pyplot as plt
from _collections import OrderedDict

states = list(map(lambda x: round(x, 3), np.arange(0, 1.002, 0.002)))
controls = list(map(lambda x: round(x, 3), np.arange(-0.4, 0.402, 0.002)))
new_st = []
dict = OrderedDict()
jj = {}


def opt_con_cst():

    for i in states:
        dict[i] = {}
        for j in controls:
            a = (i - (0.4 * i * i) + j)
            if a <= 1 and a >= 0:
                dict[i][j] = round(a, 5)


#
def optcost_st(step):
    a = step - 1
    print(a)
    jj[a] = {}
    if step == 20:
        for i in states:
            cost_cont = []
            for u, x_next in dict[i].items():
                cost = 4 * x_next + abs(u)
예제 #18
0
from _collections import OrderedDict

actor_dict = {'Atif':[300,1], 'Abul':[300,2],'Ashraf':[300,3],'Anas':[300,4]}

print(type(actor_dict))
print(actor_dict)

for i in actor_dict:
    print(i)

print('\n')

for i in OrderedDict(actor_dict):
    print(i)


d_sorted = {k: v for k, v in sorted(actor_dict.items(), key=lambda x: x[1])}
print(d_sorted)

import os

dir = os.getcwd()
file_name = dir+'/'+'dict_key.txt'
for i in actor_dict:

    f = open(file_name, 'a')
    # f = open(str(dirName_RGB_info) + 'color_info_' + str(i) + '.txt', 'r+')
    # f.truncate(0)
    # get_mask_color = client.request('vget /object/' + str(i) + '/color')
    #    print('mask color: ',get_mask_color)
    f.write(i)
예제 #19
0
    def build_compute_landscape(self, world_map):
        '''
        # Using the world_map.json config file, build 
        # a dict self.gpu_landscape like this:
        #
        #    {'machine_name1' : {'start_rank'    : <int>,
        #                        'num_gpus'      : <int>,
        #                        'gpu_device_ids': [<int>,<int>,...]
        #    {'machine_name2' : {'start_rank'    : <int>,
        #                        'num_gpus'      : <int>,
        #                        'gpu_device_ids': [<int>,<int>,...]
        #    } 
        #
        # Also sets 
        #     o self.master_hostname, the hostname
        #       running the one process that coordinates all others.
        #     o self.WORLD_SIZE, number of GPUs used across all machines
        #     o self.my_gpus, the number of GPUs on this machine
        
        :param world_map:
        :type world_map:
        :return: information about how many GPUs are
            on each node
        :rtype: OrderedDict
        '''

        if not self.hostname in world_map.keys():
            raise ConfigError(
                f"World map does not contain an entry for this machine {self.hostname}"
            )

        # Go through the world map, machine (a.k.a. node)
        # one at a time, in alpha order of the machine
        # names to ensure all copies of this script
        # come to the same conclusions about ranks

        # Build gpu_landscape:
        #
        #    {'machine_name1' : {'start_rank'    : <int>,
        #                        'num_gpus'      : <int>,
        #                        'gpu_device_ids': [<int>,<int>,...]
        #    {'machine_name2' : {'start_rank'    : <int>,
        #                        'num_gpus'      : <int>,
        #                        'gpu_device_ids': [<int>,<int>,...]
        #    }
        #
        # The structure is an OrderedDict(), containing
        # machines alphabetically by name. This discipline
        # is required so that all copies of this launch script
        # (one copy per machine) arrive at the same ordering of
        # GPUs:

        gpu_landscape = OrderedDict({})

        machine_name = self.hostname
        machine_info = world_map[self.hostname]

        try:
            machine_gpus = machine_info['gpus']
        except KeyError:
            print("World map must include a 'gpus' entry; the value may be 0")

        gpu_landscape[machine_name] = {}
        gpu_landscape[machine_name]['num_gpus'] = machine_gpus

        # List of GPU numbers to use is optional
        # in world_maps:

        machine_gpus_to_use = machine_info.get('devices', None)

        if machine_gpus_to_use is None:
            # Use all GPUs on this machine:
            machine_gpus_to_use = list(range(machine_gpus))

        gpu_landscape[machine_name]['gpu_device_ids'] = machine_gpus_to_use

        # Add 1 process for the on this machine,
        # which will run on its CPU, b/c no GPUs
        # are available:
        self.WORLD_SIZE += machine_gpus if machine_gpus > 0 else 1

        self.my_gpus = gpu_landscape[self.hostname]['num_gpus']
        self.gpu_landscape = gpu_landscape
        return gpu_landscape
예제 #20
0
OrderedDict([('AM01_20190711_170000_sw-start0_wcpa',
              {'species': 'wcpa',
               'start_time': 0.0,
               'end_time': 5.944272445820434}),
             ('AM01_20190711_170000_sw-start2_wcpa',
              {'species': 'wcpa',
               'start_time': 1.996904024767802,
               'end_time': 7.9411764705882355}),
             ('AM01_20190711_170000_sw-start4_wcpa',
              {'species': 'wcpa',
               'start_time': 3.993808049535604,
               'end_time': 9.938080495356038}),
             ('AM01_20190711_170000_sw-start6',
              {'species': 'noise',
               'start_time': 5.9907120743034055,
               'end_time': 11.93498452012384}),
             ('AM01_20190711_170000_sw-start8',
              {'species': 'noise',
               'start_time': 7.987616099071208,
               'end_time': 13.931888544891642}),
             ('AM01_20190711_170000_sw-start10',
              {'species': 'noise',
               'start_time': 9.98452012383901,
               'end_time': 15.928792569659443}),
             ('AM01_20190711_170000_sw-start12',
              {'species': 'noise',
               'start_time': 11.981424148606811,
               'end_time': 17.925696594427244}),
             ('AM01_20190711_170000_sw-start14',
              {'species': 'noise',
               'start_time': 13.978328173374614,
               'end_time': 19.922600619195048}),
             ('AM01_20190711_170000_sw-start16',
              {'species': 'noise',
               'start_time': 15.975232198142416,
               'end_time': 21.91950464396285}),
             ('AM01_20190711_170000_sw-start18',
              {'species': 'noise',
               'start_time': 17.97213622291022,
               'end_time': 23.916408668730654}),
             ('AM01_20190711_170000_sw-start20',
              {'species': 'noise',
               'start_time': 19.96904024767802,
               'end_time': 25.913312693498455}),
             ('AM01_20190711_170000_sw-start22_shwc',
              {'species': 'shwc',
               'start_time': 21.96594427244582,
               'end_time': 27.910216718266255}),
             ('AM01_20190711_170000_sw-start24_shwc',
              {'species': 'shwc',
               'start_time': 23.962848297213622,
               'end_time': 29.907120743034056}),
             ('AM01_20190711_170000_sw-start26_shwc',
              {'species': 'shwc',
               'start_time': 25.959752321981426,
               'end_time': 31.90402476780186}),
             ('AM01_20190711_170000_sw-start26_unk1',
              {'species': 'unk1',
               'start_time': 25.959752321981426,
               'end_time': 31.90402476780186}),
             ('AM01_20190711_170000_sw-start28',
              {'species': 'shwc',
               'start_time': 27.956656346749227,
               'end_time': 33.90092879256966}),
             ('AM01_20190711_170000_sw-start28_unk1',
              {'species': 'unk1',
               'start_time': 27.956656346749227,
               'end_time': 33.90092879256966}),
             ('AM01_20190711_170000_sw-start30_wcpa',
              {'species': 'wcpa',
               'start_time': 29.953560371517028,
               'end_time': 35.89783281733746}),
             ('AM01_20190711_170000_sw-start30_shwc',
              {'species': 'shwc',
               'start_time': 29.953560371517028,
               'end_time': 35.89783281733746}),
             ('AM01_20190711_170000_sw-start30_unk1',
              {'species': 'unk1',
               'start_time': 29.953560371517028,
               'end_time': 35.89783281733746}),
             ('AM01_20190711_170000_sw-start32',
              {'species': 'unk1',
               'start_time': 31.950464396284833,
               'end_time': 37.89473684210527}),
             ('AM01_20190711_170000_sw-start34',
              {'species': 'noise',
               'start_time': 33.94736842105263,
               'end_time': 39.89164086687306}),
             ('AM01_20190711_170000_sw-start36',
              {'species': 'noise',
               'start_time': 35.94427244582044,
               'end_time': 41.88854489164087}),
             ('AM01_20190711_170000_sw-start38',
              {'species': 'noise',
               'start_time': 37.94117647058824,
               'end_time': 43.88544891640867}),
             ('AM01_20190711_170000_sw-start40',
              {'species': 'noise',
               'start_time': 39.93808049535604,
               'end_time': 45.88235294117647}),
             ('AM01_20190711_170000_sw-start42',
              {'species': 'noise',
               'start_time': 41.93498452012384,
               'end_time': 47.87925696594427}),
             ('AM01_20190711_170000_sw-start44',
              {'species': 'noise',
               'start_time': 43.93188854489164,
               'end_time': 49.87616099071207}),
             ('AM01_20190711_170000_sw-start46',
              {'species': 'noise',
               'start_time': 45.92879256965944,
               'end_time': 51.873065015479874}),
             ('AM01_20190711_170000_sw-start48',
              {'species': 'noise',
               'start_time': 47.925696594427244,
               'end_time': 53.869969040247675}),
             ('AM01_20190711_170000_sw-start50',
              {'species': 'noise',
               'start_time': 49.92260061919505,
               'end_time': 55.86687306501548}),
             ('AM01_20190711_170000_sw-start52',
              {'species': 'noise',
               'start_time': 51.91950464396285,
               'end_time': 57.863777089783284}),
             ('AM01_20190711_170000_sw-start54',
              {'species': 'wcpa',
               'start_time': 53.916408668730654,
               'end_time': 59.860681114551085})])
예제 #21
0
 def _asdict(self):
     'Return a new OrderedDict which maps field names to their values.'
     return OrderedDict(zip(self._fields, self))
예제 #22
0
파일: test.py 프로젝트: simmmba/Masikdang
def show_store_categories_graph(dataframes, n=100):
    headers = {
        'user-agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Whale/2.7.98.19 Safari/537.36'
    }
    print(datetime.now())
    # 빈 리스트 생성
    stores_crawling = []

    stores = dataframes["stores"]
    boolean_stores1 = stores['id'] > 447332
    stores = stores[boolean_stores1]
    print(len(stores))
    for i, store in stores.iterrows():
        if store['address']:
            address = store['address'].split(' ')
            if "서울" in address[0]:
                if len(address) > 2:
                    query = address[0] + " " + address[1] + " " + \
                        address[2] + " " + store['store_name']
                else:
                    query = address[0] + " " + address[1] + \
                        " " + store['store_name']
            else:
                continue
        else:
            query = store['store_name']

        response = requests.get(
            "https://www.diningcode.com/isearch.php?query=" + query,
            headers=headers)
        soup = BeautifulSoup(response.text, 'html.parser')
        print(str(i) + " >> " + query)

        while soup.select('#header > h1'):
            print("reload1: " + soup.select('#header > h1')[0].text)
            response = requests.get(
                "https://www.diningcode.com/isearch.php?query=" + query,
                headers=headers)
            soup = BeautifulSoup(response.text, 'html.parser')

        if soup.find("div", id="subright-cont"):
            for href in soup.find("div", id="subright-cont").find_all("li"):
                response = requests.get("https://www.diningcode.com" +
                                        href.find("a")["href"],
                                        headers=headers)
                soup = BeautifulSoup(response.text, 'html.parser')

                # print("https://www.diningcode.com" + href.find("a")["href"])

                while soup.select('#header > h1'):
                    print("reload2: " + soup.select('#header > h1')[0].text)
                    response = requests.get("https://www.diningcode.com" +
                                            href.find("a")["href"],
                                            headers=headers)
                    soup = BeautifulSoup(response.text, 'html.parser')
                try:
                    tel = soup.select(
                        '#div_profile > div.s-list.basic-info > ul > li.tel'
                    )[0].text
                    if store["tel"] != tel:
                        continue
                except:
                    continue

                store_temp = OrderedDict()
                store_temp["id"] = i
                store_temp["store_name"] = store["store_name"]
                store_temp["branch"] = store["branch"]
                store_temp["area"] = store["area"]
                store_temp["tel"] = store["tel"]
                store_temp["address"] = store["address"]
                store_temp["latitude"] = store["latitude"]
                store_temp["longitude"] = store["longitude"]

                if store["category_list"]:
                    store_temp["category_list"] = "|".join(
                        [c["category"] for c in store["category_list"]])
                else:
                    store_temp["category_list"] = None

                # img list 넣기
                store_img = None
                if soup.select(
                        '#div_profile > div.s-list.pic-grade > ul > li.bimg.btn-gallery-open > img'
                ):
                    store_img = soup.select_one(
                        '#div_profile > div.s-list.pic-grade > ul > li.bimg.btn-gallery-open > img'
                    )['src']
                store_temp["store_img"] = store_img

                # 태그 받아오기
                tag_list = []
                for tag in soup.select(".tag"):
                    tag_list = tag.text.replace(" ", "").strip().split(",")
                if tag_list[0] != "":
                    store_temp["tag_list"] = tag_list
                else:
                    store_temp["tag_list"] = []
                # print(store_temp["tag_list"])

                amenity_list = []
                for amenity in soup.select(".char"):
                    amenity_list = amenity.text.replace(" ",
                                                        "").strip().split(",")
                if amenity_list[0] != "":
                    store_temp["amenity_list"] = amenity_list
                else:
                    store_temp["amenity_list"] = []
                # print(store_temp["amenity_list"])

                # 리뷰 받아오기
                review_list = []
                for review_item in soup.find_all("div", class_="latter-graph"):
                    review = OrderedDict()
                    review["write_name"] = review_item.find(
                        "p", class_="person-grade").find("strong").get_text()
                    review["date"] = review_item.find(
                        "i", class_="date").get_text()
                    # 없을 수도 있으니 if 문으로 처리
                    review["content"] = review_item.find(
                        "p", class_="review_contents").get_text()

                    # 점수 받아오기
                    for score in review_item.find_all("p",
                                                      class_="point-detail"):
                        index = 1
                        for score_i in score.find_all("i", class_="star"):
                            if index == 1:
                                review["taste"] = score_i.get_text()
                            elif index == 2:
                                review["price"] = score_i.get_text()
                            elif index == 3:
                                review["service"] = score_i.get_text()
                            index = index + 1

                    # 이미지 리스트
                    review_img_list = []
                    for src in review_item.find_all(
                            "div", class_="btn-gallery-review"):
                        review_img_list.append(src.find("img")["src"])
                    review["img_list"] = review_img_list

                    # tag list
                    review_tag_list = []
                    for tag in review_item.select(".tags"):
                        review_tag_list = tag.text.replace(" ", "").split("\n")
                    tag = "|".join([r for r in review_tag_list]).strip("|")
                    review["tag_list"] = tag

                    review_list.append(review)

                store_temp["review_list"] = review_list

                with open('savedd.json', 'a', encoding="utf-8") as make_file:
                    make_file.write(",")
                    json.dump(store_temp,
                              make_file,
                              ensure_ascii=False,
                              indent="\t")

                stores_crawling.append(store_temp)

                break

    # print(json.dumps(stores_crawling, ensure_ascii=False, indent="\t"))
    print(datetime.now())
예제 #23
0
 def test_sorted_answers_cardinality(self):
     """ We can sort answer with the sort_answer parameter. """
     alphanumeric = [("abé cé", 2), ("abë-cè", 1), ("dé", 2), ("dë", 1)]
     cardinal = [("abé cé", 2), ("dé", 2), ("abë-cè", 1), ("dë", 1)]
     user_defined = {"dé": 1, "abë-cè": 2, "dë": 3, "abé cé": 4}
     specific = [("dé", 2), ("abë-cè", 1), ("dë", 1), ("abé cé", 2)]
     assert_message = " sorting does not seem to work"
     self.assertEqual(self.sorted_card(group_by_letter_case=True),
                      OrderedDict(cardinal), "default" + assert_message)
     self.assertEqual(
         self.sorted_card(group_by_letter_case=True,
                          sort_answer="alphanumeric"),
         OrderedDict(alphanumeric),
         "alphanumeric" + assert_message,
     )
     self.assertEqual(
         self.sorted_card(group_by_letter_case=True,
                          sort_answer="cardinal"),
         OrderedDict(cardinal),
         "cardinal" + assert_message,
     )
     self.assertEqual(
         self.sorted_card(group_by_letter_case=True,
                          sort_answer=user_defined),
         OrderedDict(specific),
         "user defined" + assert_message,
     )
     other_question_assert_mesage = " when in relation with another question"
     self.assertEqual(
         self.sorted_card(group_by_letter_case=True,
                          other_question=self.questions[1]),
         OrderedDict([
             ("abé cé", {
                 "left blank": 2
             }),
             ("dé", {
                 "left blank": 2
             }),
             ("abë-cè", {
                 "left blank": 1
             }),
             ("dë", {
                 "left blank": 1
             }),
         ]),
         "default" + assert_message + other_question_assert_mesage,
     )
     self.assertEqual(
         self.sorted_card(group_by_letter_case=True,
                          sort_answer="alphanumeric",
                          other_question=self.questions[1]),
         OrderedDict([
             ("abé cé", {
                 "left blank": 2
             }),
             ("abë-cè", {
                 "left blank": 1
             }),
             ("dé", {
                 "left blank": 2
             }),
             ("dë", {
                 "left blank": 1
             }),
         ]),
         "alphanumeric" + assert_message + other_question_assert_mesage,
     )
     self.assertEqual(
         self.sorted_card(group_by_letter_case=True,
                          sort_answer="cardinal",
                          other_question=self.questions[1]),
         OrderedDict([
             ("abé cé", {
                 "left blank": 2
             }),
             ("dé", {
                 "left blank": 2
             }),
             ("abë-cè", {
                 "left blank": 1
             }),
             ("dë", {
                 "left blank": 1
             }),
         ]),
         "cardinal" + assert_message + other_question_assert_mesage,
     )
     self.assertEqual(
         self.sorted_card(group_by_letter_case=True,
                          sort_answer=user_defined,
                          other_question=self.questions[1]),
         OrderedDict([
             ("dé", {
                 "left blank": 2
             }),
             ("abë-cè", {
                 "left blank": 1
             }),
             ("dë", {
                 "left blank": 1
             }),
             ("abé cé", {
                 "left blank": 2
             }),
         ]),
         "user defined" + assert_message + other_question_assert_mesage,
     )
예제 #24
0
def get_sim(java_list, project_name, master_commit_id):
    project_master_dir = python_root_path + "/git_project/" + project_name + "/" + project_name
    os.chdir(project_master_dir)
    os.system("git reset --hard {}".format(master_commit_id))
    sha_file_dict = OrderedDict()
    for line_list in java_list[0:]:
        commit_sha1 = line_list[0]
        sha_file_dict[commit_sha1] = []
        if len(line_list) <= 1:
            continue
        for file_item in line_list[1:]:
            sha_file_dict[commit_sha1].append(file_item[2])
    print(len(java_list))

    dist_dict = OrderedDict()
    file_dist = OrderedDict()
    index = 0
    total = len(sha_file_dict)
    for commit_sha1, file_list in sha_file_dict.items():
        index += 1
        if index % 100 == 0:
            print("\r{} / {}".format(index, total), end="")
        file_dist[commit_sha1] = []
        dist_dict[commit_sha1] = []
        if len(file_list) == 0:
            file_dist[commit_sha1].append(" u 0")
            dist_dict[commit_sha1].append(" 0 0")
            continue
        dist_sum = 0
        for file in file_list:
            try:
                # print(commit_sha1, file)
                status1 = os.system("git checkout --quiet {} -- {}".format(
                    commit_sha1, file))
                node_str1 = ""
                node_str2 = ""
                if status1 == 0:
                    node_str1 = ASTTest.file2nodes(file)
                # if not os.path.exists(file):
                #     print(commit_sha1, file, "not exist")
                status2 = os.system("git checkout --quiet {}~1 -- {}".format(
                    commit_sha1, file))
                if status2 == 0:
                    node_str2 = ASTTest.file2nodes(file)
                if status1 == 1 and status2 == 0:
                    print(file, "delete in commit", commit_sha1)
                    dist_sum += len(node_str2)
                    file_dist[commit_sha1].append(" - " + file + " " +
                                                  str(len(node_str2)))
                elif status1 == 0 and status2 == 1:
                    print(file, "add in commit", commit_sha1)
                    dist_sum += len(node_str1)
                    file_dist[commit_sha1].append("+ " + file + " " +
                                                  str(len(node_str1)))
                elif status1 == 0 and status2 == 0:
                    distance = ASTTest.get_node_sim(node_str1, node_str2)
                    dist_sum += distance
                    file_dist[commit_sha1].append(" d " + file + " " +
                                                  str(distance))
            except Exception as e:
                pass
        dist_dict[commit_sha1].append(" " + str(dist_sum) + " " +
                                      str(len(file_list)))

    os.system("git reset --hard {}".format(master_commit_id))
    print(master_commit_id, "reset to master")
    index = -1
    total = len(file_dist)
    with open("../output/commit_diff.txt", "w", encoding="utf-8") as fw:
        for commit_sha1, v in file_dist.items():
            index += 1
            fw.write(
                str(total - index) + " " + commit_sha1 + " ".join(v) + "\n")
    index = -1
    total = len(file_dist)
    with open("../output/commit_dist.txt", "w", encoding="utf-8") as fw:
        for commit_sha1, v in dist_dict.items():
            index += 1
            fw.write(
                str(total - index) + " " + commit_sha1 + " " + " ".join(v) +
                "\n")
    os.chdir(python_root_path)
예제 #25
0
    def __init__(self, port, controller):
        '''
        Builds the GUI, connects it to the server (thread). The GUI is just another client of
        the service.
        '''
        global guiClient
        guiClient = self
        self.logger = logging.getLogger(__name__)
        self.port = port
        self.controller = controller
        self.builder = Gtk.Builder()
        riaps_folder = os.getenv('RIAPSHOME', './')
        try:
            self.builder.add_from_file(join(riaps_folder, "etc/riaps-ctrl.glade"))  # GUI construction
        except RuntimeError:
            self.logger.error('Cannot find GUI configuration file')
            raise
        self.builder.connect_signals({"onDeleteWindow": self.on_Quit,
                                      "onConsoleEntryActivate": self.on_ConsoleEntry,
                                      "onSelectApplication": self.on_SelectApplication,
                                      "onSelectDeployment": self.on_SelectDeployment,
                                      "onFolderEntryActivate": self.on_folderEntryActivate,
                                      "onKill": self.on_Kill,
                                      "onClean": self.on_Clean,
                                      "onQuit": self.on_Quit,
                                      "onLoadApplication": self.on_LoadApplication,
                                      "onViewApplication": self.on_ViewApplication
                                      })

        self.conn = rpyc.connect(self.controller.hostAddress, port)  # Local connection to the service
        GLib.io_add_watch(self.conn, 1, GLib.IO_IN, self.bg_server)  # Register the callback with the service
        self.conn.root.login("*gui*", self.on_serverMessage)  # Log in to the service

        self.mainWindow = self.builder.get_object("window1")
        self.messages = self.builder.get_object("messageTextBuffer")
        self.consoleIn = self.builder.get_object("consoleEntryBuffer")
        self.appNameEntry = self.builder.get_object("appNameEntry")
        self.deplNameEntry = self.builder.get_object("deplNameEntry")
        self.folderEntry = self.builder.get_object("folderEntry")
        #self.launchButton = self.builder.get_object("launchButton")
        #self.launchButton.set_sensitive(False)
        #self.stopButton = self.builder.get_object("stopButton")
        #self.stopButton.set_sensitive(False)
        #self.removeButton = self.builder.get_object("removeButton")
        #self.removeButton.set_sensitive(False)
        self.appLaunched = False
        self.appDownLoaded = False

        '''
        Status Table Additions
        '''
        self.cellTextPlaceHolder = '                '
        self.column_cur_size = 12
        self.row_cur_size = 16
        self.appToLoad = None
        self.appSelected = None
        self.gridScrollWindow = self.builder.get_object('scrolledwindow2')
        self.gridTable = Gtk.Grid()
        self.gridScrollWindow.add_with_viewport(self.gridTable)
        self.nodeIDDict = OrderedDict()
        self.appStatusDict = OrderedDict()
        self.init_GridTable()

        self.mainWindow.show_all()
예제 #26
0
    def compute_worker_assignments(cls, in_dir, num_workers=None):
        '''
        Given the root directory of a set of
        directories whose names are species,
        and which contain recordings by species,
        return a multi processing worker assignment.
        
        Expected:
                         in_dir

          Species1        Species2   ...     Speciesn
           smpl1_1.mp3      smpl2_1.mp3         smpln_1.mp3
           smpl1_2.mp3      smpl2_2.mp3         smpln_2mp3
                            ...
        
        Collects number of recordings available for
        each species. Creates a list of species name
        buckets such that all workers asked to process
        one of the buckets, will have roughly equal
        amounts of work.
        
        Example return:
            
            [['Species1', 'Species2], ['Species3', 'Species4', 'Species5']]
            
        The caller can then assign the first list to
        one worker, and the second list to another worker.
        
        The number of buckets, and therefore the number
        of eventual workers may be passed in. If None, 
        80% of the cores available on the current machine
        will be assumed. If num_workers is provided and
        the number is larger than the number of available
        cores, the number is reduced to the number of cores.
        
        Also returned is the number of workers on which the
        computation is based. This number is always the same
        as the number of species name lists in the return.
        But for clarity, the number is returned explicitly.

        :param in_dir: root of species recordings
        :type in_dir: str
        :param num_workers: number of buckets into which to partition 
        :type num_workers: {int | None}
        :return: list of species name lists, and number of workers.
        :rtype: ([[int]], int)
        '''

        # Create:
        #     {species : num-recordings}
        #     {species : recordings_dir}
        #     [(species1, fpath1), (species1, fpath2), (species2, fpath3)...]  
        
        sample_size_distrib = OrderedDict({})
        sample_dir_dict     = {}
        species_file_tuples = []
        
        for _dir_name, subdir_list, _file_list in os.walk(in_dir):
            for species_name in subdir_list:
                species_recordings_dir = os.path.join(in_dir, species_name)
                rec_paths = os.listdir(species_recordings_dir)
                sample_size_distrib[species_name] = len(rec_paths)
                sample_dir_dict[species_name] = species_recordings_dir
                species_file_pairs = list(zip([species_name]*len(rec_paths), rec_paths))
                species_file_tuples.extend(species_file_pairs)
            break 
        
        num_cores = mp.cpu_count()
        # Use 80% of the cores:
        if num_workers is None:
            num_workers = round(num_cores * SpectrogramChopper.MAX_PERC_OF_CORES_TO_USE  / 100)
        elif num_workers > num_cores:
            # Limit pool size to number of cores:
            num_workers = num_cores

        # Create a partitioning into equal sized files,
        # regardless of species association.
        
        assignments = cls.partition_by_recordings(species_file_tuples,
                                                  num_workers)
        num_workers_used = len(assignments)
        return assignments, num_workers_used
예제 #27
0
def read_and_process(data_form, book_number, book_se):
    discount = 0.18
    gross_profit = {}  # 毛利润
    profit = {}  # 利润
    total_expense = {}
    price = {}  # 定价
    books_print_expense = {}  # 印刷成本
    print_data_group = {}  # 数据表

    printed = {}  # 印数
    sold = {}  # 卖出

    books_logistics_expense = {}  # 库房成本
    logistics_data_group = {}  # 数据表
    color_map = {'单色': 's', '双色': 'd', '四色': 'f'}

    for i in range(1, book_number + 1):  # 生成数据表
        book_name = 'b' + str(book_se) + '-' + str(i)
        press_name = book_name + 'Press'
        sell_name = book_name + 'Sell'
        df_press = pd.read_excel(data_form, sheet_name=press_name)
        df_sell = pd.read_excel(data_form, sheet_name=sell_name)

        press_preprocess(df_press)
        times = OrderedDict({x[-1]: True for x in df_press.values})
        print_data_group[book_name] = {}
        logistics_data_group[book_name] = {}
        price[book_name] = {}
        sold[book_name] = {}

        j = 0
        for index in times:
            print_data_group[book_name][index] = []
            logistics_data_group[book_name][index] = list(df_sell.values[j])
            sold[book_name][index] = df_sell.values[j][-1]
            j += 1
        for press in df_press.values:
            print_data_group[book_name][press[-1]].append(list(press))
            price[book_name][press[-1]] = press[2]

    for book_name, each_group in print_data_group.items():  # 计算印刷成本
        expense = {}
        printed[book_name] = {}
        for key, group in each_group.items():
            # print(group)
            print_num = sum([each[1] for each in group])
            print_page = group[0][3]
            color = color_map[group[0][-2]]

            printed[book_name][key] = print_num
            expense[key] = print_expense(print_num, print_page, color)
        books_print_expense[book_name] = expense
    # print('印刷成本', books_print_expense)

    for book_name, book_no in logistics_data_group.items():  # 计算库房成本
        expense = {}
        sub_expense = 0
        for key, value in book_no.items():
            for month in range(1, 13):
                if isnan(value[month]) is False and value[month] > 0:
                    sub_expense += value[month] * 0.0273 * price[book_name][key]
            expense[key] = sub_expense
        books_logistics_expense[book_name] = expense
    # print('库房成本', books_logistics_expense)

    dict_operation(books_print_expense, books_logistics_expense, total_expense,
                   '+')  # 计算总成本
    # print('总成本', total_expense)

    # print('定价', price)
    # print('sold', sold)
    # print('printed', printed)
    # 5.利润=定价×印数×销售折扣×销售率-(印制成本+库房发货费)
    sell_rate = {}  # 销售率
    dict_operation(price, printed, gross_profit, '*')
    dict_operation(sold, printed, sell_rate, '/')
    dict_operation(gross_profit, sell_rate, gross_profit, '*')
    dict_operation(gross_profit, (1 - discount), gross_profit, '*')
    # print('毛利润', gross_profit)

    dict_operation(gross_profit, total_expense, profit, '-')
    # print('利润', profit)
    return {
        'sold': sold,
        'pressed': printed,
        'print_expense': books_print_expense,
        'logistics_expense': books_logistics_expense,
        'profit': profit
    }
예제 #28
0
def maven_kp_to_tplot(filename=None,
                      input_time=None,
                      instruments=None,
                      insitu_only=False,
                      specified_files_only=False,
                      ancillary_only=False):
    '''
    Read in a given filename in situ file into a dictionary object
    Optional keywords maybe used to downselect instruments returned
     and the time windows.

    Input:
        filename:
            Name of the in situ KP file(s) to read in.
        input_time:
            Set a time bounds/filter on the data
            (this will be necessary when this is called by a wrapper that
             seeks to ingest all data within a range of dates that may
             be allowed to span multiple days (files) ).
        instruments:
            Optional keyword listing the instruments to include
            in the returned dictionary/structure.
        insitu_only:
            Optional keyword that allows you to specify that you only want
            to download insitu files.
        specified_files_only:
            Optional keyword that allows you to specify you only want filenames
            given in 'filename' to be read in, not other files close in date/time
            as well.
        ancillary_only:
            Will only load in the spacecraft and APP info
    Output:
        A dictionary (data structure) containing up to all of the columns
            included in a MAVEN in-situ Key parameter data file.
    '''
    import pandas as pd
    import re
    from datetime import datetime, timedelta
    from dateutil.parser import parse

    filenames = []
    iuvs_filenames = []

    if instruments is not None:
        if not isinstance(instruments, builtins.list):
            instruments = [instruments]

    if filename is None and input_time is None:
        print(
            'You must specify either a set of filenames to read in, or a time frame in which '
            'you want to search for downloaded files.')

    if ancillary_only:
        instruments = ['SPACECRAFT']

    if filename is not None:
        if not isinstance(filename, builtins.list):
            filename = [filename]

        dates = []
        for file in filename:
            date = re.findall(r'_(\d{8})', file)[0]
            dates.append(date)
            if 'iuvs' in file:
                iuvs_filenames.append(file)
            else:
                filenames.append(file)
        dates.sort()

        # To keep the rest of the code consistent, if someone gave a files, or files, to load, but no input_time,
        # go ahead and create an 'input_time'
        if input_time is None:
            if len(dates) == 1:
                input_time = str(dates[0][:4]) + '-' + str(
                    dates[0][4:6]) + '-' + str(dates[0][6:])

            else:
                beg_date = min(dates)
                end_date = max(dates)
                input_time = [
                    str(beg_date[:4]) + '-' + str(beg_date[4:6]) + '-' +
                    str(beg_date[6:]),
                    str(end_date[:4]) + '-' + str(end_date[4:6]) + '-' +
                    str(end_date[6:])
                ]

    # Check for orbit num rather than time string
    if isinstance(input_time, builtins.list):
        if isinstance(input_time[0], int):
            input_time = orbit_time(input_time[0], input_time[1])
    elif isinstance(input_time, int):
        input_time = orbit_time(input_time)

    # Turn string input into datetime objects
    if isinstance(input_time, list):
        if len(input_time[0]) <= 10:
            input_time[0] = input_time[0] + ' 00:00:00'
        if len(input_time[1]) <= 10:
            input_time[1] = input_time[1] + ' 23:59:59'
        date1 = parse(input_time[0])
        date2 = parse(input_time[1])
    else:
        if len(input_time) <= 10:
            input_time += ' 00:00:00'
        date1 = parse(input_time)
        date2 = date1 + timedelta(days=1)

    date1_unix = calendar.timegm(date1.timetuple())
    date2_unix = calendar.timegm(date2.timetuple())

    # Grab insitu and iuvs files for the specified/created date ranges
    date_range_filenames = get_latest_files_from_date_range(date1, date2)
    date_range_iuvs_filenames = get_latest_iuvs_files_from_date_range(
        date1, date2)

    # Add date range files to respective file lists if desired
    if not specified_files_only:
        filenames.extend(date_range_filenames)
        iuvs_filenames.extend(date_range_iuvs_filenames)

    if not date_range_filenames and not date_range_iuvs_filenames:
        if not filenames and not iuvs_filenames:
            print(
                "No files found for the input date range, and no specific filenames were given. Exiting."
            )
            return

    # Going to look for files between time frames, but as we might have already specified
    # certain files to load in, we don't want to load them in 2x... so doing a check for that here
    filenames = list(set(filenames))
    iuvs_filenames = list(set(iuvs_filenames))

    kp_insitu = []
    if filenames:
        # Get column names
        names, inst = [], []
        crus_name, crus_inst = [], []
        c_found = False
        r_found = False
        for f in filenames:
            if kp_regex.match(os.path.basename(f)).group(
                    'description') == '_crustal' and not c_found:
                name, inss = get_header_info(f)
                # Strip off the first name for now (Time), and use that as the dataframe index.
                # Seems to make sense for now, but will it always?
                crus_name.extend(name[1:])
                crus_inst.extend(inss[1:])
                c_found = True
            elif kp_regex.match(os.path.basename(f)).group(
                    'description') == '' and not r_found:
                name, ins = get_header_info(f)
                # Strip off the first name for now (Time), and use that as the dataframe index.
                # Seems to make sense for now, but will it always?
                names.extend(name[1:])
                inst.extend(ins[1:])
                r_found = True
        all_names = names + crus_name
        all_inst = inst + crus_inst

        # Break up dictionary into instrument groups
        lpw_group, euv_group, swe_group, swi_group, sta_group, sep_group, mag_group, ngi_group, app_group, sc_group, \
        crus_group = [], [], [], [], [], [], [], [], [], [], []

        for i, j in zip(all_inst, all_names):
            if re.match('^LPW$', i.strip()):
                lpw_group.append(j)
            elif re.match('^LPW-EUV$', i.strip()):
                euv_group.append(j)
            elif re.match('^SWEA$', i.strip()):
                swe_group.append(j)
            elif re.match('^SWIA$', i.strip()):
                swi_group.append(j)
            elif re.match('^STATIC$', i.strip()):
                sta_group.append(j)
            elif re.match('^SEP$', i.strip()):
                sep_group.append(j)
            elif re.match('^MAG$', i.strip()):
                mag_group.append(j)
            elif re.match('^NGIMS$', i.strip()):
                ngi_group.append(j)
            elif re.match('^MODELED_MAG$', i.strip()):
                crus_group.append(j)
            elif re.match('^SPICE$', i.strip()):
                # NB Need to split into APP and SPACECRAFT
                if re.match('(.+)APP(.+)', j):
                    app_group.append(j)
                else:  # Everything not APP is SC in SPICE
                    # But do not include Orbit Num, or IO Flag
                    # Could probably stand to clean this line up a bit
                    if not re.match('(.+)(Orbit Number|Inbound Outbound Flag)',
                                    j):
                        sc_group.append(j)
            else:
                pass

        delete_groups = []
        if instruments is not None:
            if 'LPW' not in instruments and 'lpw' not in instruments:
                delete_groups += lpw_group
            if 'MAG' not in instruments and 'mag' not in instruments:
                delete_groups += mag_group
            if 'EUV' not in instruments and 'euv' not in instruments:
                delete_groups += euv_group
            if 'SWI' not in instruments and 'swi' not in instruments:
                delete_groups += swi_group
            if 'SWE' not in instruments and 'swe' not in instruments:
                delete_groups += swe_group
            if 'NGI' not in instruments and 'ngi' not in instruments:
                delete_groups += ngi_group
            if 'SEP' not in instruments and 'sep' not in instruments:
                delete_groups += sep_group
            if 'STA' not in instruments and 'sta' not in instruments:
                delete_groups += sta_group
            if 'MODELED_MAG' not in instruments and 'modeled_mag' not in instruments:
                delete_groups += crus_group

        # Read in all relavent data into a pandas dataframe called "temp"
        temp_data = []
        filenames.sort()
        for filename in filenames:
            # Determine number of header lines
            nheader = 0
            with open(filename) as f:
                for line in f:
                    if line.startswith('#'):
                        nheader += 1
                if kp_regex.match(os.path.basename(filename)).group(
                        'description') == '_crustal':
                    temp_data.append(
                        pd.read_fwf(filename,
                                    skiprows=nheader,
                                    index_col=0,
                                    widths=[19] + len(crus_name) * [16],
                                    names=crus_name))
                else:
                    temp_data.append(
                        pd.read_fwf(filename,
                                    skiprows=nheader,
                                    index_col=0,
                                    widths=[19] + len(names) * [16],
                                    names=names))
                for i in delete_groups:
                    del temp_data[-1][i]

        temp_unconverted = pd.concat(temp_data, axis=0, sort=True)

        # Need to convert columns
        # This is kind of a hack, but I can't figure out a better way for now

        if 'SWEA.Electron Spectrum Shape' in temp_unconverted and 'NGIMS.Density NO' in temp_unconverted:
            temp = temp_unconverted.astype(
                dtype={
                    'SWEA.Electron Spectrum Shape': np.float64,
                    'NGIMS.Density NO': np.float64
                })
        elif 'SWEA.Electron Spectrum Shape' in temp_unconverted and 'NGIMS.Density NO' not in temp_unconverted:
            temp = temp_unconverted.astype(
                dtype={'SWEA.Electron Spectrum Shape': np.float64})
        elif 'SWEA.Electron Spectrum Shape' not in temp_unconverted and 'NGIMS.Density NO' in temp_unconverted:
            temp = temp_unconverted.astype(
                dtype={'NGIMS.Density NO': np.float64})
        else:
            temp = temp_unconverted

        # Cut out the times not included in the date range
        time_unix = [
            calendar.timegm(
                datetime.strptime(i, '%Y-%m-%dT%H:%M:%S').timetuple())
            for i in temp.index
        ]
        start_index = 0
        for t in time_unix:
            if t >= date1_unix:
                break
            start_index += 1
        end_index = 0
        for t in time_unix:
            if t >= date2_unix:
                break
            end_index += 1

        # Assign the first-level only tags
        time_unix = time_unix[start_index:end_index]
        temp = temp[start_index:end_index]
        time = temp.index
        time_unix = pd.Series(time_unix)  # convert into Series for consistency
        time_unix.index = temp.index

        if 'SPICE.Orbit Number' in list(temp):
            orbit = temp['SPICE.Orbit Number']
        else:
            orbit = None
        if 'SPICE.Inbound Outbound Flag' in list(temp):
            io_flag = temp['SPICE.Inbound Outbound Flag']
        else:
            io_flag = None

        # Build the sub-level DataFrames for the larger dictionary/structure
        app = temp[app_group]
        spacecraft = temp[sc_group]
        if instruments is not None:
            if 'LPW' in instruments or 'lpw' in instruments:
                lpw = temp[lpw_group]
            else:
                lpw = None
            if 'MAG' in instruments or 'mag' in instruments:
                mag = temp[mag_group]
            else:
                mag = None
            if 'EUV' in instruments or 'euv' in instruments:
                euv = temp[euv_group]
            else:
                euv = None
            if 'SWE' in instruments or 'swe' in instruments:
                swea = temp[swe_group]
            else:
                swea = None
            if 'SWI' in instruments or 'swi' in instruments:
                swia = temp[swi_group]
            else:
                swia = None
            if 'NGI' in instruments or 'ngi' in instruments:
                ngims = temp[ngi_group]
            else:
                ngims = None
            if 'SEP' in instruments or 'sep' in instruments:
                sep = temp[sep_group]
            else:
                sep = None
            if 'STA' in instruments or 'sta' in instruments:
                static = temp[sta_group]
            else:
                static = None
            if 'MODELED_MAG' in instruments or 'modeled_mag' in instruments:
                crus = temp[crus_group]
            else:
                crus = None
        else:
            lpw = temp[lpw_group]
            euv = temp[euv_group]
            swea = temp[swe_group]
            swia = temp[swi_group]
            static = temp[sta_group]
            sep = temp[sep_group]
            mag = temp[mag_group]
            ngims = temp[ngi_group]
            crus = temp[crus_group]

        # Strip out the duplicated instrument part of the column names
        # (this is a bit hardwired and can be improved)
        for i in [
                lpw, euv, swea, swia, sep, static, ngims, mag, crus, app,
                spacecraft
        ]:
            if i is not None:
                i.columns = remove_inst_tag(i)

        if lpw is not None:
            lpw = lpw.rename(index=str, columns=param_dict)
        if euv is not None:
            euv = euv.rename(index=str, columns=param_dict)
        if swea is not None:
            swea = swea.rename(index=str, columns=param_dict)
        if swia is not None:
            swia = swia.rename(index=str, columns=param_dict)
        if sep is not None:
            sep = sep.rename(index=str, columns=param_dict)
        if static is not None:
            static = static.rename(index=str, columns=param_dict)
        if ngims is not None:
            ngims = ngims.rename(index=str, columns=param_dict)
        if mag is not None:
            mag = mag.rename(index=str, columns=param_dict)
        if crus is not None:
            crus = crus.rename(index=str, columns=param_dict)
        if app is not None:
            app = app.rename(index=str, columns=param_dict)
        if spacecraft is not None:
            spacecraft = spacecraft.rename(index=str, columns=param_dict)

        if orbit is not None and io_flag is not None:
            # Do not forget to save units
            # Define the list of first level tag names
            tag_names = [
                'TimeString', 'Time', 'Orbit', 'IOflag', 'LPW', 'EUV', 'SWEA',
                'SWIA', 'STATIC', 'SEP', 'MAG', 'NGIMS', 'MODELED_MAG', 'APP',
                'SPACECRAFT'
            ]

            # Define list of first level data structures
            data_tags = [
                time, time_unix, orbit, io_flag, lpw, euv, swea, swia, static,
                sep, mag, ngims, crus, app, spacecraft
            ]
        else:
            # Do not forget to save units
            # Define the list of first level tag names
            tag_names = [
                'TimeString', 'Time', 'LPW', 'EUV', 'SWEA', 'SWIA', 'STATIC',
                'SEP', 'MAG', 'NGIMS', 'MODELED_MAG', 'APP', 'SPACECRAFT'
            ]

            # Define list of first level data structures
            data_tags = [
                time, time_unix, lpw, euv, swea, swia, static, sep, mag, ngims,
                crus, app, spacecraft
            ]

        kp_insitu = OrderedDict(zip(tag_names, data_tags))

    # Now for IUVS
    kp_iuvs = []
    if not insitu_only and iuvs_filenames:
        for file in iuvs_filenames:
            kp_iuvs.append(read_iuvs_file(file))
    if not kp_iuvs:
        return tplot_varcreate(kp_insitu)
    else:
        return kp_insitu, kp_iuvs
예제 #29
0
hltb_unmatched_games = hltb_results[1]
hltb_not_found_games = hltb_results[2]
# hltb_matched_games = load_mocked_list()

google_results = match_google_sheets_data(google_creds,
                                          hltb_matched_games.keys())
eligible_games = google_results[0]
already_finished = google_results[1]

final_result = {}
for eligible_game in eligible_games:
    final_result[eligible_game] = hltb_matched_games[eligible_game]

print()
print('Results:')
for title, duration in OrderedDict(
        sorted(final_result.items(), key=lambda x: x[1])).items():
    print('{} ({})'.format(title, duration))
print()

print('{} of your games are already finished:'.format(len(already_finished)))
for entry in sorted(already_finished):
    print(entry)
print()

print('{} unmatched games:'.format(len(hltb_unmatched_games)))
for entry in hltb_unmatched_games:
    print(entry)
print()

print('{} games not found:'.format(len(hltb_not_found_games)))
for entry in hltb_not_found_games: