Beispiel #1
0
def test_load_yaml():

    data_test = ordereddict([('image', ordereddict([('repository', 'nextcloud'), ('tag', '15.0.2-apache'), ('pullPolicy', 'IfNotPresent')])), ('nameOverride', ''), ('fullnameOverride', ''), ('replicaCount', 1), ('ingress', ordereddict([('enabled', True), ('annotations', ordereddict())])), ('nextcloud', ordereddict([('host', 'nextcloud.corp.justin-tech.com'), ('username', 'admin'), ('password', 'changeme')])), ('internalDatabase', ordereddict([('enabled', True), ('name', 'nextcloud')])), ('externalDatabase', ordereddict([('enabled', False), ('host', None), ('user', 'VAULT:/secret/testdata/user'), ('password', 'VAULT:/secret/{environment}/testdata/password'), ('database', 'nextcloud')])), ('mariadb', ordereddict([('enabled', True), ('db', ordereddict([('name', 'nextcloud'), ('user', 'nextcloud'), ('password', 'changeme')])), ('persistence', ordereddict([('enabled', True), ('storageClass', 'nfs-client'), ('accessMode', 'ReadWriteOnce'), ('size', '8Gi')]))])), ('service', ordereddict([('type', 'ClusterIP'), ('port', 8080), ('loadBalancerIP', 'nil')])), ('persistence', ordereddict([('enabled', True), ('storageClass', 'nfs-client'), ('accessMode', 'ReadWriteOnce'), ('size', '8Gi')])), ('resources', ordereddict()), ('nodeSelector', ordereddict()), ('tolerations', []), ('affinity', ordereddict())])
    yaml_file = "./tests/test.yaml"
    data = vault.load_yaml(yaml_file)
    print(data)
    assert_equal(data, data_test)
Beispiel #2
0
    def __init__(self, document, imageTypes=None):
        self.config = document.config
        self.ownerDocument = document

        if imageTypes is None:
            self.imageTypes = [self.fileExtension]
        else:
            self.imageTypes = imageTypes[:]

        # Dictionary that makes sure each image is only generated once.
        # The key is the LaTeX source and the value is the image instance.
        self._cache = {}
        usednames = {}
        # JAM: FIXME: This writes into some arbitrary directory that may or
        # may not be related to the document we are processing. It at least
        # needs to take document name/path into consideration (see below on
        # graphicspath)
        self._filecache = os.path.abspath(
            os.path.join('.cache', self.__class__.__name__ + '.images'))

        if self.config['images']['cache']:
            usednames = self._read_cache()

        # List of images in the order that they appear in the LaTeX file
        self.images = ordereddict()

        # Images that are simply copied from the source directory
        self.staticimages = ordereddict()

        # Filename generator
        self.newFilename = Filenames(
            self.config['images'].get('filenames', raw=True),
            vars={'jobname': document.userdata.get('jobname', '')},
            extension=self.fileExtension,
            invalid=usednames)

        # Start the document with a preamble
        self.source = StringIO()
        self.source.write('\\batchmode\n')
        self.writePreamble(document)
        self.source.write('\\begin{document}\n')

        #We inject \graphicspath here because this processing occurs in some temp space but the image urls
        #are relative to the original working directory.
        #Documentation suggests that we could just set the TEXINPUTS environment variable but it does not work
        self.source.write('\\graphicspath{{%s/}}\n' %
                          (self.ownerDocument.userdata['working-dir']))

        # Set up additional options
        self._configOptions = self.formatConfigOptions(self.config['images'])
Beispiel #3
0
    def __init__(self, document, imageTypes=None):
        self.config = document.config
        self.ownerDocument = document

        if imageTypes is None:
            self.imageTypes = [self.fileExtension]
        else:
            self.imageTypes = imageTypes[:]

        # Dictionary that makes sure each image is only generated once.
        # The key is the LaTeX source and the value is the image instance.
        self._cache = {}
        usednames = {}
        # JAM: FIXME: This writes into some arbitrary directory that may or
        # may not be related to the document we are processing. It at least
        # needs to take document name/path into consideration (see below on
        # graphicspath)
        self._filecache = os.path.abspath(os.path.join(".cache", self.__class__.__name__ + ".images"))

        if self.config["images"]["cache"]:
            usednames = self._read_cache()

        # List of images in the order that they appear in the LaTeX file
        self.images = ordereddict()

        # Images that are simply copied from the source directory
        self.staticimages = ordereddict()

        # Filename generator
        self.newFilename = Filenames(
            self.config["images"].get("filenames", raw=True),
            vars={"jobname": document.userdata.get("jobname", "")},
            extension=self.fileExtension,
            invalid=usednames,
        )

        # Start the document with a preamble
        self.source = StringIO()
        self.source.write("\\batchmode\n")
        self.writePreamble(document)
        self.source.write("\\begin{document}\n")

        # We inject \graphicspath here because this processing occurs in some temp space but the image urls
        # are relative to the original working directory.
        # Documentation suggests that we could just set the TEXINPUTS environment variable but it does not work
        self.source.write("\\graphicspath{{%s/}}\n" % (self.ownerDocument.userdata["working-dir"]))

        # Set up additional options
        self._configOptions = self.formatConfigOptions(self.config["images"])
Beispiel #4
0
 def read(cls, stream, index):
     self = cls(index.get_string(stream.read_u30()))
     item_count = stream.read_u30()
     self.item_info = ordereddict((index.get_string(stream.read_u30()),
         index.get_string(stream.read_u30()))
         for i in range(item_count))
     return self
Beispiel #5
0
 def __init__(
     self,
     name: str,
     package_name: str,
     package_path: str,
     list_paths: _ListPaths,
     get_data: _GetData,
     save_data: Optional[_SaveData] = None,
     visible: Optional[bool] = True,
     editable: Optional[bool] = True,
     sort_value: Optional[int] = None,
 ):
     try:
         self.name = name
         self.package_name = package_name
         self.package_path = package_path
         self.visible = visible
         self.editable = False if settings.DISABLE_EDIT_DATA_SOURCES else editable
         self.sort_value = sort_value
         self.nodes = ordereddict()
         self.nodes_fully_loaded = False
         self.error = None
         self._list_paths = list_paths
         self._get_data = get_data
         self._save_data = save_data
         self._path_generator = None
         self._validate()
     except BaseException as e:
         self.error = str(e)
         raise e
def prepResult(title, array):
    arq_base = open("base.txt","r")
    text = arq_base.read().lower().split()

    chrs = (78 - len(title)) / 2
    cont = 0
    enf = ""
    enf2 = "_"
    while cont < chrs:
        enf += "*"
        enf2 += "_"
        cont += 1
    result = ("\n//" + enf + " " + title + " " + enf + "\\\\\n\n"
        "|                   Palavra                    |   |          Frequência           |\n\n")
    frequencia = FreqDist(text)
    frequencia_ord = ordereddict(sorted(frequencia.items(), key = lambda e: (-e[1], e[0])))

    for freq in frequencia_ord:
        if(freq in array):
            lim = 84 / 2
            right = lim / 2 + len(freq)
            chrs = (78 - (len(freq)) + len(str(frequencia_ord[freq]))) / 4
            cont = 0
            enf = ""
            while cont < chrs:
                enf += " "
                cont += 1
            result += "|" + enf + freq + enf + " | " + enf + str(frequencia_ord[freq]) + enf + "|\n"
        

    result += "\n\\\\________________________________________________________________________________//\n\n"
    arq_base.close()
    return result
Beispiel #7
0
 def __init__(self):
     self.structure_dict = ordereddict([
                 ('name',''),
                 ('property',''),
                 ('value',''),
                 ('source',''),
                 ])
     self.list_of_structures = []
Beispiel #8
0
 def __init__(self):
     self.config = ordereddict([
         ('name',''),
         ('state',''),
         ('read',''),
         ('write',''),
         ('cksum',''),
         ])
     self.structure_dict = ordereddict([
         ('pool',''),
         ('state',''),
         ('scan',''),
         ('action',''),
         ('see',''),
         ('scrub',''),
         ('status',''),
         ('config', []),
         ('errors', []),
         ])
Beispiel #9
0
    def stoichiometry(self) -> Tuple['Formula', 'Formula', int]:
        """Reduce to unique stoichiomerty using "chemical symbols" A, B, C, ...

        Examples
        --------
        >>> Formula('CO2').stoichiometry()
        (Formula('AB2'), Formula('CO2'), 1)
        >>> Formula('(H2O)4').stoichiometry()
        (Formula('AB2'), Formula('OH2'), 4)
        """
        count1, N = self._reduce()
        c = ord('A')
        count2 = ordereddict()
        count3 = ordereddict()
        for n, symb in sorted((n, symb) for symb, n in count1.items()):
            count2[chr(c)] = n
            count3[symb] = n
            c += 1
        return self.from_dict(count2), self.from_dict(count3), N
Beispiel #10
0
    def __format__(self, fmt: str) -> str:
        """Format Formula as str.

        Possible formats: ``'hill'``, ``'metal'``, ``'abc'``, ``'latex'``,
        ``'html'``, ``'rest'``.

        Example
        -------
        >>> f = Formula('OH2')
        >>> '{f}, {f:hill}, {f:latex}'.format(f=f)
        'OH2, H2O, OH$_{2}$'
        """

        if fmt == 'hill':
            count = self.count()
            count2 = ordereddict()
            for symb in 'CH':
                if symb in count:
                    count2[symb] = count.pop(symb)
            for symb, n in sorted(count.items()):
                count2[symb] = n
            return dict2str(count2)

        if fmt == 'metal':
            count = self.count()
            result2 = [(s, count.pop(s)) for s in non_metals if s in count]
            result = [(s, count[s]) for s in sorted(count)]
            result += sorted(result2)
            return dict2str(ordereddict(result))

        if fmt == 'abc':
            _, f, N = self.stoichiometry()
            return dict2str({symb: n * N for symb, n in f._count.items()})

        if fmt == 'latex':
            return self._tostr('$_{', '}$')
        if fmt == 'html':
            return self._tostr('<sub>', '</sub>')
        if fmt == 'rest':
            return self._tostr(r'\ :sub`', r'`\ ')
        if fmt == '':
            return self._formula
        raise ValueError('Invalid format specifier')
Beispiel #11
0
 def __init__(self):
     self.structure_dict = ordereddict([
                 ('name',''),
                 ('full_size',''),
                 ('m_size',''),
                 ('full_size2',''),
                 ('usage',''),
                 ('speed',''),
                 ('status',''),
                 ('other_part',''),
                 ])
     self.list_of_structures = []
def prepResultCSV(title, array, spamwriter):
    arq_base = open("base.txt","r")
    text = arq_base.read().lower().split()

    frequencia = FreqDist(text)
    frequencia_ord = ordereddict(sorted(frequencia.items(), key = lambda e: (-e[1], e[0])))

    spamwriter.writerow([title])
    spamwriter.writerow(["Palavra", "Frequência"])

    for freq in frequencia_ord:
        if(freq in array):
            spamwriter.writerow([freq, str(frequencia_ord[freq])])
        
    arq_base.close()
def get_configuration():
    """ Get the configuration from command line and config files """
    # This is the dict we will return
    configuration = {
        'global': {},
        'logging': {},
        'tables': ordereddict()
    }

    # Read the command line options
    cmd_line_options = command_line_parser.parse()

    # If a configuration file is specified, read that as well
    conf_file_options = None
    if 'config' in cmd_line_options:
        conf_file_options = config_file_parser.parse(
            cmd_line_options['config'])

    # Extract global config
    configuration['global'] = __get_global_options(
        cmd_line_options,
        conf_file_options)

    # Extract logging config
    configuration['logging'] = __get_logging_options(
        cmd_line_options,
        conf_file_options)

    # Extract table configuration
    # If the --table cmd line option is set, it indicates that only table
    # options from the command line should be used
    if 'table_name' in cmd_line_options:
        configuration['tables'] = __get_cmd_table_options(cmd_line_options)

    elif 'dynamodb_table' in conf_file_options:
        configuration['tables'] = __get_dynamodb_config_options(conf_file_options)

    else:
        configuration['tables'] = __get_config_table_options(conf_file_options)

    # Ensure some basic rules
    __check_gsi_rules(configuration)
    __check_logging_rules(configuration)
    __check_table_rules(configuration)

    return configuration
Beispiel #14
0
    def all_entries(self):
        entry_and_victim = (
                app.db.session
                    .query(Entry, EntryVictim)
                    .filter(Entry.event == self.id,
                            EntryVictim.entry == Entry.id)
                    .order_by(Entry.date.desc(), Entry.id.desc())
                    .all())

        # Reshape (entry, victim) list into a list of (entry, victims).
        entries = ordereddict()
        for entry, victim in entry_and_victim:
            if entry.id not in entries:
                entry.victims = []
                entries[entry.id] = entry
            entries[entry.id].victims.append(victim)

        return entries.values()
Beispiel #15
0
def get_configuration():
    """ Get the configuration from command line and config files """
    # This is the dict we will return
    configuration = {
        'global': {},
        'logging': {},
        'tables': ordereddict()
    }

    # Read the command line options
    cmd_line_options = command_line_parser.parse()

    # If a configuration file is specified, read that as well
    conf_file_options = None
    if 'config' in cmd_line_options:
        conf_file_options = config_file_parser.parse(
            cmd_line_options['config'])

    # Extract global config
    configuration['global'] = __get_global_options(
        cmd_line_options,
        conf_file_options)

    # Extract logging config
    configuration['logging'] = __get_logging_options(
        cmd_line_options,
        conf_file_options)

    # Extract table configuration
    # If the --table cmd line option is set, it indicates that only table
    # options from the command line should be used
    if 'table_name' in cmd_line_options:
        configuration['tables'] = __get_cmd_table_options(cmd_line_options)
    else:
        configuration['tables'] = __get_config_table_options(conf_file_options)

    # Ensure some basic rules
    __check_gsi_rules(configuration)
    __check_logging_rules(configuration)
    __check_table_rules(configuration)

    return configuration
Beispiel #16
0
    def deserialize(self, overwrite_existing: bool = False) -> DataSource:
        """
        Returns the corresponding DataSource.
        The back end is treated as the universal source of truth UNLESS overwrite_existing is set to True.
        """
        if self.id_ not in SOURCES:
            self._register_as_new_custom_source()
        elif overwrite_existing:
            if not self.editable:
                raise HTTPException(status_code=400,
                                    detail="This source can not be edited")
            existing = SOURCES[self.id_].serialize()
            if (self.list_paths != existing.list_paths) or (self.get_data !=
                                                            existing.get_data):
                SOURCES[self.id_].kill_all_nodes()
                self.nodes = ordereddict()
                self.nodes_fully_loaded = False
            self._register_as_new_custom_source()

        return SOURCES[self.id_]
Beispiel #17
0
Datei: h.py Projekt: posita/dyce
    def __init__(self, items: _SourceT) -> None:
        r"Initializer."
        super().__init__()
        self._simple_init = None
        tmp: Counter[_OutcomeT] = counter()

        if isinstance(items, (int, Integral)):
            if items != 0:
                self._simple_init = items
                outcome_type = type(items)
                count_1 = type(items)(1)
                outcome_range = range(
                    items,
                    0,
                    1 if items < 0 else -1  # count toward zero
                )
                tmp.update({outcome_type(i): count_1 for i in outcome_range})
        elif isinstance(items, HAbleT):
            tmp.update(items.h())
        elif isinstance(items, ABCMapping):
            tmp.update(items)
        elif isinstance(items, ABCIterable):
            # Items is either an Iterable[_OutcomeT] or an Iterable[Tuple[_OutcomeT,
            # _CountT]] (although this technically supports Iterable[Union[_OutcomeT,
            # Tuple[_OutcomeT, _CountT]]])
            for item in items:
                if isinstance(item, tuple):
                    outcome, count = item
                    tmp[outcome] += count
                else:
                    tmp[item] += 1
        else:
            raise ValueError("unrecognized initializer {}".format(type(items)))

        # Sort and omit zero counts. We use an OrderedDict instead of a Counter to
        # support Python versions earlier than 3.7 which did not guarantee order
        # preservation for the latter.
        self._h: _MappingT = ordereddict(
            {outcome: tmp[outcome]
             for outcome in sorted(tmp) if tmp[outcome]})
Beispiel #18
0
def parse(config_path):
    """ Parse the configuration file

    :type config_path: str
    :param config_path: Path to the configuration file
    """
    config_path = os.path.expanduser(config_path)

    # Read the configuration file
    config_file = ConfigParser.RawConfigParser()
    config_file.SECTCRE = re.compile(r"\[ *(?P<header>.*) *\]")
    config_file.optionxform = lambda option: option
    config_file.read(config_path)

    #
    # Handle [global]
    #
    if 'global' in config_file.sections():
        global_config = __parse_options(
            config_file,
            'global',
            [
                {
                    'key': 'aws_access_key_id',
                    'option': 'aws-access-key-id',
                    'required': False,
                    'type': 'str'
                },
                {
                    'key': 'aws_secret_access_key',
                    'option': 'aws-secret-access-key-id',
                    'required': False,
                    'type': 'str'
                },
                {
                    'key': 'region',
                    'option': 'region',
                    'required': False,
                    'type': 'str'
                },
                {
                    'key': 'check_interval',
                    'option': 'check-interval',
                    'required': False,
                    'type': 'int'
                },
                {
                    'key': 'circuit_breaker_url',
                    'option': 'circuit-breaker-url',
                    'required': False,
                    'type': 'str'
                },
                {
                    'key': 'circuit_breaker_timeout',
                    'option': 'circuit-breaker-timeout',
                    'required': False,
                    'type': 'float'
                },
            ])

    #
    # Handle [logging]
    #
    if 'logging' in config_file.sections():
        logging_config = __parse_options(
            config_file,
            'logging',
            [
                {
                    'key': 'log_level',
                    'option': 'log-level',
                    'required': False,
                    'type': 'str'
                },
                {
                    'key': 'log_file',
                    'option': 'log-file',
                    'required': False,
                    'type': 'str'
                },
                {
                    'key': 'log_config_file',
                    'option': 'log-config-file',
                    'required': False,
                    'type': 'str'
                }
            ])

    if 'default_options' in config_file.sections():
        # nothing is required in defaults, so we set required to False
        default_config_options = deepcopy(TABLE_CONFIG_OPTIONS)
        for item in default_config_options:
            item['required'] = False
        default_options = __parse_options(
            config_file, 'default_options', default_config_options)
        # if we've got a default set required to be false for table parsing
        for item in TABLE_CONFIG_OPTIONS:
            if item['key'] in default_options:
                item['required'] = False
    else:
        default_options = {}

    #
    # Handle [table: ]
    #
    table_config = {'tables': ordereddict()}

    # Find the first table definition
    found_table = False
    for current_section in config_file.sections():
        if current_section.rsplit(':', 1)[0] != 'table':
            continue

        found_table = True
        current_table_name = current_section.rsplit(':', 1)[1].strip()
        table_config['tables'][current_table_name] = \
            dict(default_options.items() + __parse_options(
                config_file, current_section, TABLE_CONFIG_OPTIONS).items())

    if not found_table:
        print('Could not find a [table: <table_name>] section in {0}'.format(
            config_path))
        sys.exit(1)

    # Find gsi definitions - this allows gsi's to be defined before the table
    # definitions we don't worry about parsing everything twice here
    for current_section in config_file.sections():
        try:
            header1, gsi_key, header2, table_key = current_section.split(' ')
        except ValueError:
            continue

        if header1 != 'gsi:':
            continue

        if table_key not in table_config['tables']:
            print('No table configuration matching {0} found.'.format(
                table_key))
            sys.exit(1)

        if 'gsis' not in table_config['tables'][table_key]:
            table_config['tables'][table_key]['gsis'] = {}

        table_config['tables'][table_key]['gsis'][gsi_key] = \
            ordereddict(default_options.items() + __parse_options(
                config_file, current_section, TABLE_CONFIG_OPTIONS).items())

    return ordereddict(
        global_config.items() +
        logging_config.items() +
        table_config.items())
### AVALIANDO RETWEETS ###
arq_base = open("base.txt","r")
text = arq_base.readlines()
freq_rts = defaultdict(int)

for stmt in text:
    words = stmt.lower().split()
    i = 0
    if(stmt[0:2].lower() == 'rt'):
        for word in words:
            if(word == 'rt'):
                word = words[i] + " " + words[i +1]
                if(word in rts):
                    freq_rts[word] += 1
            i += 1
spamwriter.writerow(["RETWEETS"])
spamwriter.writerow(["Palavra", "Frequência"])
freq_rts_ord = ordereddict(sorted(freq_rts.items(), key = lambda e: (-e[1], e[0])))
for freq in freq_rts_ord:
    spamwriter.writerow([freq, str(freq_rts_ord[freq])])
arq_base.close()
### FIM AVALIANDO RETWEETS ###

prepResultCSV("MENTIONS", mentions, spamwriter)
prepResultCSV("VERBOS", verbos, spamwriter)
prepResultCSV("STOP WORDS", stop_words, spamwriter)
prepResultCSV("DEMAIS EXPRESSÕES", exprs, spamwriter)

arq.close()
arq_results_csv.close()
print("Processamento Finalizado!")
Beispiel #20
0
 def __init__(self, name, items=()):
     self.name = name
     self.item_info = ordereddict(items)
Beispiel #21
0
def make_class_timetable_array(klass, lessons):

    # Load abbreviations for subjects: "Inglese" instead "Lingua e
    # Cultura Straniera Inglese" as description for ING).

    mat_names = get_mat_names()

    # print(f"\n=== {klass} ====================")
    rr = list()
    # dd = ["Ora"] + [d[:3].upper() for d in DAYS_SHIFT]

    # START_TIMES = "07h50 08h40 09h30 10h30 11h20 12h15 13h10 14h00".split()

    END_TIMES = [  # BUG: stupid name!
        "8:00 - 8:40",
        "8:50 - 9:30",
        "9:40 - 10:20",
        "10:40 - 11:20",
        "11:30 - 12:10",
        "12:20 - 13:00",
        "13:10 - 14:00",
        "14:00 - 14:40",
    ]
    END_TIMES = ordereddict((  # BUG: stupid name!
        ("07h50", "8:00\n 8:40"),
        ("08h40", "8:50\n 9:30"),
        ("09h30", "9:40\n10:20"),
        ("10h30", "10:40\n11:20"),
        ("11h20", "11:30\n12:10"),
        ("12h15", "12:20\n13:00"),
        ("13h10", "13:10\n14:00"),
        ("14h00", "14:00\n14:40"),
    ))
    # BUG
    # hh = [""] + [s.replace("h", ".").lstrip("0") for s in START_TIMES]
    hh = [""] + list(END_TIMES.values())
    rr.append(hh)

    for day, lessons in sorted(lessons.items(), key=day_sorter):
        r = [day.capitalize()]
        # BUG:
        # Here I'm looping on lessons but it is important to also
        # produce lines (hour) where this class has no lesson (e.g.:
        # this class start the day with the second hour on monday).
        # for o in sorted(lessons, key=start_sorter):
        #     mat = mat_names[o.MAT_COD]             # ING -> Inglese
        #     r.append(f"{mat.strip()}\n{o.DOC_COGN.strip()}")

        foo = ordereddict()
        # for st in START_TIMES:  # prefill with all start times!
        #     foo[st] = None
        # for less in lessons:
        #     foo[less.ORA_INIZIO] = less
        for st in START_TIMES:  # prefill with all start times!
            foo[st] = list()
        for less in lessons:
            foo[less.ORA_INIZIO].append(less)

        for index, (st, oo) in enumerate(foo.items()):
            if not oo:  # no lessons at this time
                r.append("")
            else:
                mm = list()
                try:
                    for o in oo:
                        mat = mat_names[o.MAT_COD]  # ING -> Inglese
                        mat = o.MAT_COD  # .capitalize()
                        mm.append(mat)
                except KeyError as e:
                    foo = f"{index=},{o.MAT_COD=},{o.DOC_NOME=}"
                    debug(f"make_class_timetable_array ({foo}) {e}")
                    continue
                mat = "/".join(mm).strip()
                # if len(oo) > 1:
                #     debug(f"MULTIPLE {mat}")
                pp = list()
                for o in oo:
                    pp.append(o.DOC_COGN.strip())
                prof = "/".join(pp)
                # BUG: single MAT only ???
                r.append(f"{mat}\n{prof}\nsincrona")
        rr.append(r)

    rr = list(zip(*rr, fillvalue=""))
    kk = [""] * 3 + [klass] + [""] * 3
    rr.insert(0, [""] * len(kk))
    rr.insert(1, kk)
    return rr
Beispiel #22
0
 def __init__(self):
     self.structure_dict = ordereddict([
         ('name',''),
         ('uniq', ''),
         ])
     self.disk_map = []
def __get_dynamodb_config_options(conf_file_options):
    """ Get all table options from DynamoDb table

    In order to use this feature your IAM User or Role will need to have access to Read (Scan) the
    table that contains the data. For security it is recommended that there is a separate statement
    added to the existing policy that limits read to that table. See below

    {
      "Version": "2012-10-17",
      "Statement": [
        {
          "Effect": "Allow",
          "Action": "dynamodb:Scan",
          "Resource": "arn:aws:dynamodb:eu-west-1:1234567890:table/dynamic-dynamodb-config-table"
        }
      ]
    }

    The table must have the following field defined:
    table_name : This is the name of the table to be processed

    :type conf_file_options: dict
    :param conf_file_options: Dictionary with all config file options
    :returns: ordereddict -- E.g. {'table_name': {}}
    """
    options = ordereddict()

    config_table_name = conf_file_options['dynamodb_table']

    # todo :- this is a HACK : Sort it out !!!! This should be using the logic in aws.dynamodb
    conn = __get_connection_dynamodb(conf_file_options=conf_file_options)
    config_table = Table(table_name=config_table_name, connection=conn)

    #
    # This needs to be aware that the results of the scan may be paginated
    #
    for row in config_table.scan():

        table_name = row['table_name']
        options[table_name] = {}

        # Regular table options
        for option in DEFAULT_OPTIONS['table'].keys():

            options[table_name][option] = DEFAULT_OPTIONS['table'][option]

            if option not in row:
                continue

            if option == 'sns_message_types':

                try:
                    raw_list = row[option]
                    options[table_name][option] = [i.strip() for i in raw_list.split(',')]

                except:
                    print(
                        'Error parsing the "sns-message-types" option: {0}'.format(row[option]))

            else:
                options[table_name][option] = row[option]

    return options
Beispiel #24
0
def main(argv=None):
    """
    Run amplimap setup wizard.
    """
    try:
        basedir = os.path.dirname(os.path.realpath(__file__))

        # parse the arguments, which will be available as properties of args (e.g. args.probe)
        parser = argparse.ArgumentParser(
            description="amplimap v{} setup wizard".format(__version__),
            formatter_class=argparse.ArgumentDefaultsHelpFormatter)

        # specify parameters
        parser.add_argument("operation",
                            help="type of setup to perform: paths / indices",
                            action="store_true")
        parser.add_argument("--debug", help="debug mode", action="store_true")
        if argv is None:
            args = parser.parse_args()
        else:
            args = parser.parse_args(argv)

        raise Exception('Not implemented yet.')

        if args.operation == 'paths':
            # TODO:
            # - ask aligner, caller
            # - ask about modules:
            # - check path for:
            # aligner, caller, bedtools, samtools (error if not found)
            # annovar, picard, bcftools (warning if not found)

            answers = collections.ordereddict()
            for key, q in questions.items():
                answers[key] = q.ask()

            # print('Your answers:')
            # for key, answer in answers.items():
            #     print('{}: {}'.format(
            #         key, answer
            #     ))

            tools_required = ['bedtools', 'samtools']

            tools_for_variants = ['annovar', 'bcftools']

            if answers['aligner'] == 'bwa':
                tools_required.append('bwa')
            elif answers['aligner'] == 'bowtie2':
                tools_required.append('bwa')
            elif answers['aligner'] == 'star':
                tools_required.append('bwa')
        elif args.operation == 'indices':
            # TODO:
            # - ask path to reference genome
            # - build fasta index, default aligner index
            # - ask to enter name of other index to create (bwa, bowtie2, star)
            pass
        else:
            raise Exception('Please specify a valid operation!')
    except KeyboardInterruptException:
        sys.stderr.write('Interrupted.\n')
        return 1
    except EOFError:
        sys.stderr.write('Aborted.\n')
        return 1
    except Exception as e:
        sys.stderr.write('\nERROR: {}\n\n'.format(e))
        sys.stderr.write('{} {} failed!\n'.format(__title__, __version__))
        return 1
Beispiel #25
0
def __get_config_table_options(conf_file_options):
    """ Get all table options from the config file

    :type conf_file_options: ordereddict
    :param conf_file_options: Dictionary with all config file options
    :returns: ordereddict -- E.g. {'table_name': {}}
    """
    options = ordereddict()

    if not conf_file_options:
        return options

    for table_name in conf_file_options['tables']:
        options[table_name] = {}

        # Regular table options
        for option in DEFAULT_OPTIONS['table'].keys():
            options[table_name][option] = DEFAULT_OPTIONS['table'][option]

            if option not in conf_file_options['tables'][table_name]:
                continue

            if option == 'sns_message_types':
                try:
                    raw_list = conf_file_options['tables'][table_name][option]
                    options[table_name][option] = \
                        [i.strip() for i in raw_list.split(',')]
                except:
                    print('Error parsing the "sns-message-types" '
                          'option: {0}'.format(
                              conf_file_options['tables'][table_name][option]))
            else:
                options[table_name][option] = \
                    conf_file_options['tables'][table_name][option]

        # GSI specific options
        if 'gsis' in conf_file_options['tables'][table_name]:
            for gsi_name in conf_file_options['tables'][table_name]['gsis']:
                for option in DEFAULT_OPTIONS['gsi'].keys():
                    opt = DEFAULT_OPTIONS['gsi'][option]

                    if 'gsis' not in options[table_name]:
                        options[table_name]['gsis'] = {}

                    if gsi_name not in options[table_name]['gsis']:
                        options[table_name]['gsis'][gsi_name] = {}

                    if (option not in conf_file_options['tables'][table_name]
                        ['gsis'][gsi_name]):
                        options[table_name]['gsis'][gsi_name][option] = opt
                        continue

                    if option == 'sns_message_types':
                        try:
                            raw_list = conf_file_options['tables'][table_name][
                                'gsis'][gsi_name][option]
                            opt = [i.strip() for i in raw_list.split(',')]
                        except:
                            print('Error parsing the "sns-message-types" '
                                  'option: {0}'.format(
                                      conf_file_options['tables'][table_name]
                                      ['gsis'][gsi_name][option]))
                    else:
                        opt = conf_file_options['tables'][table_name]['gsis'][
                            gsi_name][option]

                    options[table_name]['gsis'][gsi_name][option] = opt

    return options
def __get_config_table_options(conf_file_options):
    """ Get all table options from the config file

    :type conf_file_options: ordereddict
    :param conf_file_options: Dictionary with all config file options
    :returns: ordereddict -- E.g. {'table_name': {}}
    """
    options = ordereddict()

    if not conf_file_options:
        return options

    for table_name in conf_file_options['tables']:
        options[table_name] = {}

        # Regular table options
        for option in DEFAULT_OPTIONS['table'].keys():
            options[table_name][option] = DEFAULT_OPTIONS['table'][option]

            if option not in conf_file_options['tables'][table_name]:
                continue

            if option == 'sns_message_types':
                try:
                    raw_list = conf_file_options['tables'][table_name][option]
                    options[table_name][option] = \
                        [i.strip() for i in raw_list.split(',')]
                except:
                    print(
                        'Error parsing the "sns-message-types" '
                        'option: {0}'.format(
                            conf_file_options['tables'][table_name][option]))
            else:
                options[table_name][option] = \
                    conf_file_options['tables'][table_name][option]

        # GSI specific options
        if 'gsis' in conf_file_options['tables'][table_name]:
            for gsi_name in conf_file_options['tables'][table_name]['gsis']:
                for option in DEFAULT_OPTIONS['gsi'].keys():
                    opt = DEFAULT_OPTIONS['gsi'][option]

                    if 'gsis' not in options[table_name]:
                        options[table_name]['gsis'] = {}

                    if gsi_name not in options[table_name]['gsis']:
                        options[table_name]['gsis'][gsi_name] = {}

                    if (option not in conf_file_options[
                            'tables'][table_name]['gsis'][gsi_name]):
                        options[table_name]['gsis'][gsi_name][option] = opt
                        continue

                    if option == 'sns_message_types':
                        try:
                            raw_list = conf_file_options[
                                'tables'][table_name]['gsis'][gsi_name][option]
                            opt = [i.strip() for i in raw_list.split(',')]
                        except:
                            print(
                                'Error parsing the "sns-message-types" '
                                'option: {0}'.format(
                                    conf_file_options[
                                        'tables'][table_name][
                                            'gsis'][gsi_name][option]))
                    else:
                        opt = conf_file_options[
                            'tables'][table_name]['gsis'][gsi_name][option]

                    options[table_name]['gsis'][gsi_name][option] = opt

    return options
Beispiel #27
0
from dtale_desktop.pydantic_utils import BaseApiModel
from dtale_desktop.settings import settings
from dtale_desktop.source_code_tools import (
    get_source_file,
    create_package_name,
    create_data_source_package,
    move_data_source_package,
    load_data_source_package,
)
from dtale_desktop.subprocesses import execute_profile_report_builder

_ListPaths = Callable[..., Union[List[str], Awaitable[List[str]]]]
_GetData = Callable[[str], Union[pd.DataFrame, Awaitable[pd.DataFrame]]]
_SaveData = Callable[[str, pd.DataFrame], None]

SOURCES: Dict[str, "DataSource"] = ordereddict()


class DataSource:
    name: str
    package_name: str
    package_path: str
    visible: bool
    editable: bool
    sort_value: int
    nodes: Dict[str, "Node"]
    nodes_fully_loaded: bool
    error: Optional[str]
    _list_paths: _ListPaths
    _get_data: _GetData
    _save_data: Optional[_SaveData]
def parse(config_path):
    """ Parse the configuration file

    :type config_path: str
    :param config_path: Path to the configuration file
    """
    config_path = os.path.expanduser(config_path)

    # Read the configuration file
    config_file = ConfigParser.RawConfigParser()
    config_file.optionxform = lambda option: option
    config_file.read(config_path)

    #
    # Handle [global]
    #
    if 'global' in config_file.sections():
        global_config = __parse_options(
            config_file,
            'global',
            [
                {
                    'key': 'aws_access_key_id',
                    'option': 'aws-access-key-id',
                    'required': False,
                    'type': 'str'
                },
                {
                    'key': 'aws_secret_access_key',
                    'option': 'aws-secret-access-key-id',
                    'required': False,
                    'type': 'str'
                },
                {
                    'key': 'region',
                    'option': 'region',
                    'required': False,
                    'type': 'str'
                },
                {
                    'key': 'check_interval',
                    'option': 'check-interval',
                    'required': False,
                    'type': 'int'
                },
                {
                    'key': 'circuit_breaker_url',
                    'option': 'circuit-breaker-url',
                    'required': False,
                    'type': 'str'
                },
                {
                    'key': 'circuit_breaker_timeout',
                    'option': 'circuit-breaker-timeout',
                    'required': False,
                    'type': 'float'
                },
            ])

    #
    # Handle [logging]
    #
    if 'logging' in config_file.sections():
        logging_config = __parse_options(
            config_file,
            'logging',
            [
                {
                    'key': 'log_level',
                    'option': 'log-level',
                    'required': False,
                    'type': 'str'
                },
                {
                    'key': 'log_file',
                    'option': 'log-file',
                    'required': False,
                    'type': 'str'
                },
                {
                    'key': 'log_config_file',
                    'option': 'log-config-file',
                    'required': False,
                    'type': 'str'
                }
            ])

    if 'default_options' in config_file.sections():
        # nothing is required in defaults, so we set required to False
        default_config_options = deepcopy(TABLE_CONFIG_OPTIONS)
        for item in default_config_options:
            item['required'] = False
        default_options = __parse_options(
            config_file, 'default_options', default_config_options)
        # if we've got a default set required to be false for table parsing
        for item in TABLE_CONFIG_OPTIONS:
            if item['key'] in default_options:
                item['required'] = False
    else:
        default_options = {}

    #
    # Handle [table: ]
    #
    table_config = {'tables': ordereddict()}

    # Find the first table definition
    found_table = False
    for current_section in config_file.sections():
        if current_section.rsplit(':', 1)[0] != 'table':
            continue

        found_table = True
        current_table_name = current_section.rsplit(':', 1)[1].strip()
        table_config['tables'][current_table_name] = \
            dict(default_options.items() + __parse_options(
                config_file, current_section, TABLE_CONFIG_OPTIONS).items())

    if not found_table:
        print('Could not find a [table: <table_name>] section in {0}'.format(
            config_path))
        sys.exit(1)

    # Find gsi definitions - this allows gsi's to be defined before the table
    # definitions we don't worry about parsing everything twice here
    for current_section in config_file.sections():
        try:
            header1, gsi_key, header2, table_key = current_section.split(' ')
        except ValueError:
            continue

        if header1 != 'gsi:':
            continue

        if table_key not in table_config['tables']:
            print('No table configuration matching {0} found.'.format(
                table_key))
            sys.exit(1)

        if 'gsis' not in table_config['tables'][table_key]:
            table_config['tables'][table_key]['gsis'] = {}

        table_config['tables'][table_key]['gsis'][gsi_key] = \
            ordereddict(default_options.items() + __parse_options(
                config_file, current_section, TABLE_CONFIG_OPTIONS).items())

    return ordereddict(
        global_config.items() +
        logging_config.items() +
        table_config.items())
def parse(config_path):
    """ Parse the configuration file

    :type config_path: str
    :param config_path: Path to the configuration file
    """
    config_path = os.path.expanduser(config_path)

    # Read the configuration file
    config_file = ConfigParser.RawConfigParser()
    config_file.optionxform = lambda option: option
    config_file.read(config_path)

    #
    # Handle [global]
    #
    if "global" in config_file.sections():
        global_config = __parse_options(
            config_file,
            "global",
            [
                {"key": "aws_access_key_id", "option": "aws-access-key-id", "required": False, "type": "str"},
                {
                    "key": "aws_secret_access_key",
                    "option": "aws-secret-access-key-id",
                    "required": False,
                    "type": "str",
                },
                {"key": "region", "option": "region", "required": False, "type": "str"},
                {"key": "check_interval", "option": "check-interval", "required": False, "type": "int"},
                {"key": "circuit_breaker_url", "option": "circuit-breaker-url", "required": False, "type": "str"},
                {
                    "key": "circuit_breaker_timeout",
                    "option": "circuit-breaker-timeout",
                    "required": False,
                    "type": "float",
                },
            ],
        )

    #
    # Handle [logging]
    #
    if "logging" in config_file.sections():
        logging_config = __parse_options(
            config_file,
            "logging",
            [
                {"key": "log_level", "option": "log-level", "required": False, "type": "str"},
                {"key": "log_file", "option": "log-file", "required": False, "type": "str"},
                {"key": "log_config_file", "option": "log-config-file", "required": False, "type": "str"},
            ],
        )

    if "default_options" in config_file.sections():
        # nothing is required in defaults, so we set required to False
        default_config_options = deepcopy(TABLE_CONFIG_OPTIONS)
        for item in default_config_options:
            item["required"] = False
        default_options = __parse_options(config_file, "default_options", default_config_options)
        # if we've got a default set required to be false for table parsing
        for item in TABLE_CONFIG_OPTIONS:
            if item["key"] in default_options:
                item["required"] = False
    else:
        default_options = {}

    #
    # Handle [table: ]
    #
    table_config = {"tables": ordereddict()}

    # Find the first table definition
    found_table = False
    for current_section in config_file.sections():
        if current_section.rsplit(":", 1)[0] != "table":
            continue

        found_table = True
        current_table_name = current_section.rsplit(":", 1)[1].strip()
        table_config["tables"][current_table_name] = dict(
            default_options.items() + __parse_options(config_file, current_section, TABLE_CONFIG_OPTIONS).items()
        )

    if not found_table:
        print("Could not find a [table: <table_name>] section in {0}".format(config_path))
        sys.exit(1)

    # Find gsi definitions - this allows gsi's to be defined before the table
    # definitions we don't worry about parsing everything twice here
    for current_section in config_file.sections():
        try:
            header1, gsi_key, header2, table_key = current_section.split(" ")
        except ValueError:
            continue

        if header1 != "gsi:":
            continue

        if table_key not in table_config["tables"]:
            print("No table configuration matching {0} found.".format(table_key))
            sys.exit(1)

        if "gsis" not in table_config["tables"][table_key]:
            table_config["tables"][table_key]["gsis"] = {}

        table_config["tables"][table_key]["gsis"][gsi_key] = ordereddict(
            default_options.items() + __parse_options(config_file, current_section, TABLE_CONFIG_OPTIONS).items()
        )

    return ordereddict(global_config.items() + logging_config.items() + table_config.items())