예제 #1
0
def parse_systemdir(systemdir):
    """
    """
    files = set(iter(filter(lambda f: f.endswith('.yaml'), os.listdir(systemdir))))
    systemfiles = list()
    # parse default system file
    if 'system.yaml' in files:
        files.remove('system.yaml')
        path = os.path.join(systemdir, 'system.yaml')
        with open(path, 'r') as f:
            streams = list(yaml.load_all(f))
            if len(streams) > 0:
                for i in range(len(streams)):
                    systemfiles.append(SystemFile(path, i, streams[i]))
            else:
                logger.warning("no systems found in system.yaml")
    else:
        logger.info("no system.yaml system file was found")
    # parse all remaining system files
    for systemfile in files:
        path = os.path.join(systemdir, systemfile)
        with open(path, 'r') as f:
            streams = list(yaml.load_all(f))
            if len(streams) > 0:
                for i in range(len(streams)):
                    systemfiles.append(SystemFile(path, i, streams[i]))
            else:
                logger.warning("no systems found in %s", systemfile)
    return systemfiles
예제 #2
0
    def __init__(self, args):
        # Load ballot records from yaml file
        self.input = args[0]
        self.precs = 0
        try:
            stream = open(self.input + '.yaml', 'r')
        except:
            print('Unable to open ' + self.input + '\n')
            exit(0)
        else:
            a = audit_header.AuditHeader()
            a.load_from_file(stream)
            self.b = list(yaml.load_all(stream))

        # If a jurisdiction_slate filename was given, then load its
        #  contents
        if len(args) == 2:
            try:
                stream = open(args[1] + '.yaml', 'r')
            except:
                print('Unable to open ' + self.input + '\n')
                exit(0)
            else:
                a = audit_header.AuditHeader()
                a.load_from_file(stream)
                self.j = yaml.load_all(stream)
        else:
            self.j = False

        # Add the vote counts of candidates with the same ID# using
        #  sumation(). Write the vote totals for each candidate to the
        #  report stream.
        self.serialize_csv(self.sumation())
예제 #3
0
 def __init__(self, fn=None, content=None):
     IDocumentFormatter.__init__(self)
     if fn is not None:
         with file(fn, "r") as f:
             self.__content = load_all(f, Loader=Loader)
     else:
         self.__content = load_all(content, Loader=Loader)
    def validate(self, **kwargs):
        """
        Validates a submission file

        :param file_path: path to file to be loaded.
        :param data: pre loaded YAML object (optional).
        :return: Bool to indicate the validity of the file.
        """
        try:
            submission_file_schema = json.load(open(self.default_schema_file, 'r'))

            additional_file_section_schema = json.load(open(self.additional_info_schema, 'r'))

            # even though we are using the yaml package to load,
            # it supports JSON and YAML
            data = kwargs.pop("data", None)
            file_path = kwargs.pop("file_path", None)

            if file_path is None:
                raise LookupError("file_path argument must be supplied")

            if data is None:
                try:
                    # We try to load using the CLoader for speed improvements.
                    data = yaml.load_all(open(file_path, 'r'), Loader=yaml.CLoader)
                except Exception as e: #pragma: no cover
                    data = yaml.load_all(open(file_path, 'r')) #pragma: no cover

            for data_item in data:
                if data_item is None:
                    continue
                try:
                    if 'comment' in data_item:
                        validate(data_item, additional_file_section_schema)
                    else:
                        validate(data_item, submission_file_schema)

                except ValidationError as ve:
                    self.add_validation_message(
                            ValidationMessage(file=file_path,
                                                message=ve.message + ' in ' + str(ve.instance)))

            if self.has_errors(file_path):
                return False
            else:
                return True

        except ScannerError as se:  # pragma: no cover
            self.add_validation_message(  # pragma: no cover
                ValidationMessage(file=file_path, message=
                    'There was a problem parsing the file.  '
                    'This can be because you forgot spaces '
                    'after colons in your YAML file for instance.  '
                    'Diagnostic information follows.\n' + str(se)))
            return False
예제 #5
0
    def __init__(self, election, record1, record2,
                 merge_output):
        self.rstream = open(''.join([merge_output,'.log']), 'w')
        # Load ballot records from yaml file
        try:
            stream = open(''.join([record1,'.yml']), 'r')
        except IOError:
            self.rstream.write(''.join(['Unable to open ',record1,'\n']))
            print(''.join(['Unable to open ',record1,'\n']))
            raise
        else:
            a = audit_header.AuditHeader()
            a.load_from_file(stream)
            guid1 = a.file_id
            prov1 = a.provenance
            self.b1 = list(yaml.load_all(stream))
            stream.close()
        try:
            stream = open(''.join([record2,'.yml']), 'r')
        except IOError:
            self.rstream.write(''.join(['Unable to open ',record2,'\n']))
            print(''.join(['Unable to open ',record2,'\n']))
            raise
        else:
            a = audit_header.AuditHeader()
            a.load_from_file(stream)
            guid2 = a.file_id
            prov2 = a.provenance
            self.b2 = list(yaml.load_all(stream))
            stream.close()

        # Get the election specs from file
        try:
            stream = open(''.join([election,'.yml']), 'r')
        except IOError:
            self.rstream.write(''.join(['Unable to open ',record2,'\n']))
            print(''.join(['Unable to open ',record2,'\n']))
            raise
        else:
            a = audit_header.AuditHeader()
            a.load_from_file(stream)
            self.templ_type = a.type
            self.e = yaml.load(stream)
            stream.close()
            if self.e.has_key('precinct_list'):
                del self.e['precinct_list']

        # Combine provenances and guids from input files
        self.new_prov = []
        self.new_prov.extend(prov1)
        self.new_prov.extend(prov2)
        self.new_prov.append(guid1)
        self.new_prov.append(guid2)
예제 #6
0
    def test_generating_simple_init(self, mock_conf):
        mock_conf.yml_dir_path = RC_PATH
        yml_path = _render_yml(self.init_variables, "generic-init.yml.j2")
        with open(yml_path, 'r') as stream:
            generated_files = yaml.load_all(stream.read())
        with open("./fake_templates/simple-init.yml", 'r') as stream:
            expected_files = yaml.load_all(stream.read())
        try:
            # compare pod
            self.assertEqual(expected_files.next(), generated_files.next())

        except StopIteration:
            self.fail("Incorrect type of generated yml")
예제 #7
0
    def test_generating_without_ports(self, mock_conf):
        self.control_variables["ports"] = ['']
        mock_conf.yml_dir_path = RC_PATH
        yml_path = _render_yml(self.control_variables)
        with open(yml_path, 'r') as stream:
            generated_files = yaml.load_all(stream.read())
        with open("./fake_templates/no_ports.yml", 'r') as stream:
            expected_files = yaml.load_all(stream.read())
        try:
            # compare ReplicationController
            self.assertEqual(expected_files.next(), generated_files.next())

        except StopIteration:
            self.fail("Incorrect type of generated yml")
예제 #8
0
파일: freecad.py 프로젝트: jreinhardt/BOLTS
	def __init__(self,repo):
		DataBase.__init__(self,"freecad",repo)
		self.bases = []

		self.base_classes = Links()
		self.collection_bases = Links()

		if not exists(self.backend_root):
			e = MalformedRepositoryError("freecad directory does not exist")
			e.set_repo_path(repo.path)
			raise e

		for coll in listdir(self.backend_root):
			basefilename = join(self.backend_root,coll,"%s.base" % coll)
			if not exists(basefilename):
				#skip directory that is no collection
				continue
			try:
				base_info =  list(yaml.load_all(open(basefilename,"r","utf8"), Loader=yaml.FullLoader))
				# FullLoader is not implemented in pyyaml < 5.1
			except AttributeError:
				# this is deprecated for newer pyyaml versions
				base_info =  list(yaml.load_all(open(basefilename,"r","utf8")))
			if len(base_info) != 1:
				raise MalformedCollectionError(
						"Not exactly one YAML document found in file %s" % basefilename)
			base_info = base_info[0]
			for basefile in base_info:
				if basefile["type"] == "function":
					basepath = join(self.backend_root,coll,basefile["filename"])
					if not exists(basepath):
						raise MalformedBaseError("Python module %s does not exist" % basepath)
					for func in basefile["functions"]:
						try:
							function = BaseFunction(func,basefile,coll,self.backend_root)
							self.bases.append(function)
							self.collection_bases.add_link(repo.collections[coll],function)
							for id in func["classids"]:
								if not id in repo.classes:
									raise MalformedBaseError(
										"Unknown class %s" % id)
								if self.base_classes.contains_dst(repo.classes[id]):
									raise NonUniqueBaseError(id)
								self.base_classes.add_link(function,repo.classes[id])
						except ParsingError as e:
							e.set_base(basefile["filename"])
							e.set_collection(coll)
							raise e
				else:
					raise MalformedBaseError("Unknown base type %s" % basefile["type"])
예제 #9
0
def test_constructor(data_filename, canonical_filename, verbose=False):
    _make_loader()
    _make_canonical_loader()
    native1 = None
    native2 = None
    try:
        native1 = list(yaml.load_all(open(data_filename, 'rb'), Loader=MyLoader))
        native2 = list(yaml.load_all(open(canonical_filename, 'rb'), Loader=MyCanonicalLoader))
        assert native1 == native2, (native1, native2)
    finally:
        if verbose:
            print("NATIVE1:")
            pprint.pprint(native1)
            print("NATIVE2:")
            pprint.pprint(native2)
예제 #10
0
    def test_generating_with_emprtDir_path(self, mock_conf):
        self.control_variables["host_path"] = None
        self.control_variables["container_path"] = "/foo/bar/container"
        mock_conf.yml_dir_path = RC_PATH
        yml_path = _render_yml(self.control_variables)
        with open(yml_path, 'r') as stream:
            generated_files = yaml.load_all(stream.read())
        with open("./fake_templates/emptyDir.yml", 'r') as stream:
            expected_files = yaml.load_all(stream.read())
        try:
            # compare ReplicationController
            self.assertEqual(expected_files.next(), generated_files.next())

        except StopIteration:
            self.fail("Incorrect type of generated yml")
예제 #11
0
파일: yaml_ordered.py 프로젝트: jowolf/tlg
def ordered_load_all (stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
    class OrderedLoader(Loader):
        pass
    def construct_mapping(loader, node):
        loader.flatten_mapping(node)
        #print 'NODE #############', node.start_mark.line, node.start_mark.column, node.value [0][0]
        #if isinstance (node.value [0][0].value, (str, unicode)):
        #  lines [node.value[0][0].value] = node.start_mark.line

        def add_line_map (node):
          if hasattr (node, 'value'):
            if isinstance (node.value, (str, unicode)):
              lines [node.value] = node.start_mark.line
            else:
              #print type (node.value)  # all 'list'
              for subnode in node.value:
                #print subnode [0]
                add_line_map (subnode [0])

        add_line_map (node)

        # nope, these don't work, as the nodes are primitive types by the time they're loaded:
        #node._line = node.start_mark.line
        #node._column = node.start_mark.column

        return object_pairs_hook(loader.construct_pairs(node))
    OrderedLoader.add_constructor(
        yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
        construct_mapping)
    return yaml.load_all(stream, OrderedLoader)
예제 #12
0
    def read_data(self, datafile):
        with open(datafile, "rt") as f:
            parsed = yaml.load_all(f)
            layout = dict2obj(parsed.next())
            meta = dict2obj(parsed.next())
            content = dict2obj(parsed.next())

        if layout.section != 'layout':
            exit('layout document in "' + datafile + '" is malformed.')
        elif meta.section != 'meta':
            exit('meta document in "' + datafile + '" is malformed.')
        elif content.section != 'content':
            exit('content document in "' + datafile + '" is malformed.')

        rows = { 'rows': [] }

        if layout.header:
            header = []
            for cell in layout.header:
                header.append([eval(cell)])
        else:
            header = None

        for rownum in layout.rows:
            parsed_cell = []
            for cell in rownum.items()[0][1]:
                parsed_cell.append(eval(cell))
            rows['rows'].append( dict(zip(rownum.keys(), [parsed_cell])) )

        # return header, rows
        self.header = header
        self.rows = rows
예제 #13
0
파일: db.py 프로젝트: gldnspud/fosmc
def load_djs(path, db):
    with open(os.path.join(path, "djs.yaml")) as f:
        for dj in yaml.load_all(f):
            _populate_slug_and_name(dj)
            _store_replacement(db["dj"], dj)
            db["dj"][dj["slug"]] = dj
            if "city" in dj:
                city = dj["city"]
                cityslug = slugify(city)
                if cityslug not in db["city"]:
                    db["city"][cityslug] = dict(name=city, slug=cityslug, lint_created_by_dj=dj["slug"])
                dj["city"] = cityslug
                city_djs = db["city"][cityslug].setdefault("djs", [])
                city_djs.append(dj["slug"])
            # Normalize 'genre' to 'genres'.
            genres = dj.setdefault("genres", [])
            if "genre" in dj:
                genres.append(dj.pop("genre"))
            genreslugs = []
            for genre in genres:
                genreslug = slugify(genre)
                if genreslug not in db["genre"]:
                    db["genre"][genreslug] = dict(name=genre, slug=genreslug, lint_created_by_dj=dj["slug"])
                genreslugs.append(genreslug)
                genredjs = db["genre"][genreslug].setdefault("djs", [])
                genredjs.append(dj["slug"])
            dj["genres"] = genreslugs
예제 #14
0
def read_yaml(yaml_filename):
    docs=[]
    with open(yaml_filename) as f:
        docs=yaml.load_all(f)
        docs=list(docs)
#        docs=yaml.dump_all(ya, encoding='utf8', allow_unicode=True)
    return docs
예제 #15
0
    def update_all(self, form, force_rollback=False):        
        form = self._quote_element_names(form)
        form_dict_list = yaml.load_all(form)

        self.session.begin()
            
        is_modified = False
        for form_dict in form_dict_list:
            element_name = form_dict['element_name']
            ename = ObjectSpec.parse(element_name, expected=ElementName)
                        
            # update_all() does not look in the instance_cache, must exist in DB
            instance = self.session.resolve_element_spec(ename)
            self._update_instance(instance, form_dict)
            is_modified = is_modified or self.session.is_modified(instance)
        
        desc = self.session.create_change_description()

        if force_rollback:
            self.log.info("Forced Rollback")
            self.session.rollback()
                    
        elif is_modified:
            self.log.fine("Instance Modified: Commit")                                            
            self.session.commit()
            
        else:
            self.log.fine("Instance Unchanged: Rollback")                                            
            self.session.rollback()
        
        return desc
예제 #16
0
def construct_case(filename, name):
    """
    Parse a definition of a test case from a yaml file and construct the
    TestCase subclass dynamically.
    """
    def make_test(test_name, definition, i):
        def m(self):
            if name in SKIP_TESTS.get(self.es_version, ()):
                raise SkipTest()
            self.run_code(definition)
        m.__doc__ = '%s:%s.test_from_yaml_%d (%s): %s' % (
            __name__, name, i, '/'.join(filename.split('/')[-2:]), test_name)
        m.__name__ = 'test_from_yaml_%d' % i
        return m

    with open(filename) as f:
        tests = list(yaml.load_all(f))

    attrs = {
        '_yaml_file': filename
    }
    i = 0
    for test in tests:
        for test_name, definition in test.items():
            if test_name == 'setup':
                attrs['_setup_code'] = definition
                continue

            attrs['test_from_yaml_%d' % i] = make_test(test_name, definition, i)
            i += 1

    return type(name, (YamlTestCase, ), attrs)
예제 #17
0
def load_packages_multi(resources):
    root = os.path.dirname(resources)

    pkgs =  list(yaml.load_all(open(resources)))[0]
    use = []
    for p in pkgs:
        
        p['id_package'] = basename(p['dir']) 
        if not exists(join(root, p['dir'], 'setup.py')):
            continue
        src = join(root, p['dir'], 'src')
        if not exists(src):
            #print('unknonw %r' % src)
            continue
        roots = os.listdir(src)
        
        for r in roots:
            if 'egg-info' in r:
                continue
            if not isdir(join(src,r)):
                continue
            a = dict()
            a['src'] = join(src,r)
            a['repo'] = basename(p['dir'])
            a['id_package'] = r
            use.append(a)
    return use
예제 #18
0
def parse_post(f):
    parsed = yaml.load_all(f)
    post_header = next(parsed)
    if 'postready' in post_header and (not post_header['postready']):
        print("Post not ready")
        return None
    f.seek(0)
    l = f.readline()
    while (l != '...\n') and (l != ''):
        l = f.readline()
    if l == '':
        return None
    comment = f.read()
    post_header['comment'] = comment
    imgs = []
    if 'image1' in post_header and post_header['image1']:
        with open(post_header['image1'], 'rb') as f:
            imgs += [f.read()]
    if 'image2' in post_header and post_header['image2']:
        with open(post_header['image2'], 'rb') as f:
            imgs += [f.read()]
    if 'image3' in post_header and post_header['image3']:
        with open(post_header['image3'], 'rb') as f:
            imgs += [f.read()]
    if 'image4' in post_header and post_header['image4']:
        with open(post_header['image4'], 'rb') as f:
            imgs += [f.read()]
    post_header['imgs'] = imgs
    return post_header
예제 #19
0
파일: results.py 프로젝트: pwaller/pwa
def kick(self, params):
    """
    Dump a list of datasets which need to be kicked
    """
    
    if not params.dataset:
        print "Please specify --dataset"
        raise SystemExit
    
    match = re.compile("^.*?-P(.+)-R(\d+).root$").match
    matches = ((f, match(f)) for f in listdir("."))
    files = [(name, f.groups()) for name, f in matches if f]
    files = sorted([(name, int(run), period.split("_")[-1]) for name, (period, run) in files])
    
    _, data_info = load_all(open(params.dataset))
    by_period, by_run = {}, {}
    for d in data_info["datasets"]:
        by_period.setdefault(d.period, []).append(d)
        by_period.setdefault(d.period[0], []).append(d)
        assert not d.run in by_run
        by_run[d.run] = d
           
        #by_run.setdefault(d.run, []).append(d)
        
    if not exists("kicked"):
        makedirs("kicked")
    
    for name, run, period in files:
        if period != by_run[run].period or period == "UNK":
            print name, period, by_run[run].period
            rename(name, pjoin("kicked", name))
예제 #20
0
def load_packages(resources):
    root = os.path.dirname(resources)

    pkgs =  list(yaml.load_all(open(resources)))
    use = []
    for p in pkgs:
        p['id_package'] = basename(p['dir']) 
        if not exists(join(root, p['dir'], 'setup.py')):
            continue
        src = join(root, p['dir'], 'src')
        if not exists(src):
            #print('unknonw %r' % src)
            continue
        roots = os.listdir(src)
        this_pkgs = []
        for r in roots:
            if 'egg-info' in r:
                continue
            if not isdir(join(src,r)):
                continue
            this_pkgs.append(r)
        p['src'] = src
        p['packages'] = this_pkgs
        use.append(p)
    return use
예제 #21
0
    def take_action(self, parsed_args):
        self.log.debug('take_action(%s)' % parsed_args)
        # set default max-width
        if parsed_args.max_width == 0:
            parsed_args.max_width = 80
        client = self.app.client_manager.congressclient
        with open(parsed_args.policy_file_path, "r") as stream:
            policies = yaml.load_all(stream)
            try:
                body = next(policies)
            except StopIteration:
                raise Exception('No policy found in file.')
            try:
                body = next(policies)
                raise Exception(
                    'More than one policy found in file. None imported.')
            except StopIteration:
                pass
        data = client.create_policy(body)

        def rule_dict_to_string(rules):
            rule_str_list = [rule['rule'] for rule in rules]
            return "\n".join(rule_str_list)

        data['rules'] = rule_dict_to_string(data['rules'])
        return zip(*sorted(six.iteritems(data)))
예제 #22
0
def test_constructor_types(data_filename, code_filename, verbose=False):
    _make_objects()
    native1 = None
    native2 = None
    try:
        native1 = list(yaml.load_all(open(data_filename, 'rb'), Loader=MyLoader))
        if len(native1) == 1:
            native1 = native1[0]
        native2 = _load_code(open(code_filename, 'rb').read())
        try:
            if native1 == native2:
                return
        except TypeError:
            pass
        if verbose:
            print("SERIALIZED NATIVE1:")
            print(_serialize_value(native1))
            print("SERIALIZED NATIVE2:")
            print(_serialize_value(native2))
        assert _serialize_value(native1) == _serialize_value(native2), (native1, native2)
    finally:
        if verbose:
            print("NATIVE1:")
            pprint.pprint(native1)
            print("NATIVE2:")
            pprint.pprint(native2)
예제 #23
0
    def _exec_module(self, module):
        fullname = module.__name__

        if yaml is None:
            raise ImportError('PyYaml is not installed')

        class MyYamlLoader(YamlLoader):
            pass

        for tag, func in iteritems(self._defaults):
            @functools.wraps(func)
            def constructor(loader, node):
                return func(fullname, loader.construct_mapping(node))

            yaml.add_constructor(tag, constructor, Loader=MyYamlLoader)

        try:
            stream = file(self.get_filename(fullname), 'r')
            docs = yaml.load_all(stream, Loader=MyYamlLoader)

        except IOError:
            raise ImportError("IO error while reading a stream")
        except yaml.YAMLError as e:
            raise e  # XXX convert into SyntaxError

        else:
            for doc in docs:
                if not hasattr(doc, '__name__'):
                    continue
                setattr(module, doc.__name__, doc)
예제 #24
0
def decomposeRedux(osTemplateFilename):
    output = []
    with open(osTemplateFilename, 'r') as f:
        for data in yaml.load_all(f, Loader=yaml.Loader):
            objVals = data['objects']
            for obj in objVals:
                filename = 'kube-' + data['metadata']['name']
                fileType = None
                outValue = {}
                for objkey, objval in obj.items():
                    if objkey == 'kind' and objval == 'Service':
                        fileType = 'svc'
                        outValue[objkey] = objval
                    elif objkey == 'kind' and objval == 'ReplicationController':
                        fileType = 'rc'
                        outValue[objkey] = objval
                    elif objkey == 'kind' and objval == 'Pod':
                        fileType = 'po'
                        outValue[objkey] = objval
                    elif objkey == 'id':
                        filename = filename + '-' + objval
                    else:
                        outValue[objkey] = objval
                filename = filename + '-' + fileType + '.yaml'
                with open(filename, 'w') as outfile:
                    outfile.write(
                        yaml.dump(outValue, default_flow_style=False))
                    output.append(filename)
    return output
예제 #25
0
파일: theme.py 프로젝트: nunb/MaTiSSe
  def get(self, source, name='theme', div_id=''):
    """
    Get theme from source stream.

    Parameters
    ----------
    source: str
    name: str
    div_id: str
    """
    self.div_id = div_id
    if len(source) > 0:
      try:
        for data in load_all(source):
          if name in data:
            for element in data[name]:
              self.__get_copy_from(data=element)
              self.__get_canvas(data=element)
              self.__get_ordered_list(data=element)
              self.__get_unordered_list(data=element)
              self.__get_ordered_list_items(data=element)
              self.__get_unordered_list_items(data=element)
              self.__get_toc(data=element)
              self.__get_slide(data=element)
              self.__get_box_like(env='box', data=element)
              self.__get_box_like(env='note', data=element)
              self.__get_box_like(env='table', data=element)
              self.__get_box_like(env='figure', data=element)
              self.__get_box_like(env='video', data=element)
        self.custom = True
      except YAMLError:
        print('No valid definition of theme has been found')
    self.__get_css()
예제 #26
0
 def _show_log(self, request):
     log_data = "Log not found"
     MIN_LEVEL = LEVEL_IMPORTANT
     try:
         MIN_LEVEL = int(request.args.get(b"level", [MIN_LEVEL])[0])
     except ValueError as e:
         logger.warn("Got invalid log 'level' in request arguments: "
                     "{level}", level=request.args[b"level"])
     filename = None
     date = bytes_to_str(request.args.get(b"date", [b"current"])[0])
     if date == datetime.today().strftime("%Y-%m-%d"):
         filename = "{}.yaml".format(self.channel)
     elif date_regex.match(date):
         filename = "{}.{}.yaml".format(self.channel, date)
     elif date == "current":
         filename = "{}.yaml".format(self.channel)
         date = datetime.today().strftime("%Y-%m-%d")
     if filename and os.path.isfile(os.path.join(self.log_dir, filename)):
         with open(os.path.join(self.log_dir, filename)) as logfile:
             log_data = '<table>'
             for i, data in enumerate(yaml.load_all(logfile)):
                 if data["levelno"] > MIN_LEVEL:
                     _prepare_yaml_element(data)
                     log_data += line_templates[data["levelname"]].format(
                         index=i, **data)
             log_data += '</table>'
     request.write(str_to_bytes(log_page_template.format(
         log_data=log_data, title=self.title, header=header,
         footer=footer, channel=self.channel_link(), date=date,
         Level=MIN_LEVEL)))
     request.finish()
예제 #27
0
def main(argv=None):

	parser = OptionParser(description='fabtests yaml parsing utility')
	parser.add_option('-d', action='store_true', default=False, help=difference.__doc__)
	parser.add_option('-v', action='store_true', default=False, help=pretty.__doc__)
	(options, args) = parser.parse_args()

	if len(args) == 0:
		fd = sys.stdin
	elif len(args) > 1:
		class fd:
			@staticmethod
			def read():
				r1 = list(map(open, args))
				r2 = [x.read() for x in r1]
				return reduce(operator.add, r2)
	else:
		fd = open(args[0], 'r')

	yi = yaml.load_all(fd.read())

	if options.d:
		perfprint(difference(yi))

	if options.v:
		pretty(yi)

	return 0
예제 #28
0
def prepare_reporting(mongo_addr, dbname, config_path):
    from citizendesk.common.utils import get_logger
    from citizendesk.common.dbc import mongo_dbs
    from citizendesk.feeds.config import set_config
    import citizendesk.outgest.dispatch as dispatch

    logger = get_logger()

    mongo_dbs.set_dbname(dbname)
    DbHolder = namedtuple('DbHolder', 'db')
    mongo_dbs.set_db(DbHolder(db=MongoClient(mongo_addr[0], mongo_addr[1])[mongo_dbs.get_dbname()]))

    config_data = None

    if config_path:
        try:
            cfghf = open(config_path)
            config_data = cfghf.read()
            cfghf.close()
            config_yaml = yaml.load_all(config_data)
            config_data = config_yaml.next()
            config_yaml.close()
        except:
            config_data = None
            logger.error('can not read config file: ' + str(config_path))
            return False

    dispatch.setup_blueprints(app, config_data)

    return True
예제 #29
0
  def _ImportBackupResources(self, file_name):
    f = file(os.path.dirname(__file__) + file_name, 'r')
    for res in yaml.load_all(f):
      try:
        author_key = models.Author.get_by_key_name(res['author_id'])
        author_key2 = None
        if 'author_id2' in res:
          author_key2 = models.Author.get_by_key_name(res['author_id2'])

        resource = models.Resource(
          title=res['title'],
          subtitle=res.get('subtitle') or None,
          description=res.get('description') or None,
          author=author_key,
          second_author=author_key2,
          url=unicode(res['url']),
          social_url=unicode(res.get('social_url') or ''),
          browser_support=res.get('browser_support') or [],
          update_date=res.get('update_date'),
          publication_date=res['publication_date'],
          tags=res['tags'],
          draft=False # These are previous resources. Mark them as published.
          )
        resource.put()

      except TypeError:
        pass # TODO(ericbidelman): Not sure why this is throwing an error, but
             # ignore it, whatever it is.
    f.close()
예제 #30
0
파일: form.py 프로젝트: sundari/Reporter
 def construct_panes(self, fields_file):
    """Read the fields file and use the data to construct the
    collapsible panes"""
    fields_data = yaml.load_all(open(fields_file))
    for pane_data in fields_data:
        self.panes.append(Pane(self, pane_data))
    self.Layout()
예제 #31
0
    def validate(self, **kwargs):
        """
        Validates a submission file.

        :param file_path: path to file to be loaded.
        :param data: pre loaded YAML object (optional).
        :return: Bool to indicate the validity of the file.
        """
        data_file_handle = None
        return_value = False

        try:
            submission_file_schema = None
            additional_file_section_schema = None

            with file_opener(self.default_schema_file,
                             'r') as submission_schema:
                submission_file_schema = json.load(submission_schema)

            with file_opener(self.additional_info_schema,
                             'r') as additional_schema:
                additional_file_section_schema = json.load(additional_schema)

            # even though we are using the yaml package to load,
            # it supports JSON and YAML
            data = kwargs.pop("data", None)
            file_path = kwargs.pop("file_path", None)

            if file_path is None:
                raise LookupError("file_path argument must be supplied")

            if data is None:
                data_file_handle = file_opener(file_path, 'r')
                data = yaml.load_all(data_file_handle, Loader=Loader)

            for data_item_index, data_item in enumerate(data):
                if data_item is None:
                    continue
                try:
                    if not data_item_index and 'data_file' not in data_item:
                        validate(data_item, additional_file_section_schema)
                    else:
                        validate(data_item, submission_file_schema)

                except ValidationError as ve:
                    self.add_validation_message(
                        ValidationMessage(file=file_path,
                                          message=ve.message + ' in ' +
                                          str(ve.instance)))

            if not self.has_errors(file_path):
                return_value = True

        except LookupError as le:
            raise le

        except ScannerError as se:  # pragma: no cover
            self.add_validation_message(  # pragma: no cover
                ValidationMessage(
                    file=file_path,
                    message='There was a problem parsing the file.  '
                    'This can be because you forgot spaces '
                    'after colons in your YAML file for instance.  '
                    'Diagnostic information follows.\n' + str(se)))
            return_value = False

        except Exception as e:
            self.add_validation_message(
                ValidationMessage(file=file_path, message=e.__str__()))
            return_value = False

        finally:
            if data_file_handle:
                data_file_handle.close()

        return return_value
예제 #32
0
    def generate_configuration(cls,
                               cluster_spec,
                               cvmfs=False,
                               cephfs=False,
                               cephfs_volume_size=None,
                               debug=False):
        """Generate Kubernetes manifest files used to init REANA cluster.

        :param cluster_spec: Dictionary representing complete REANA
            cluster spec file.
        :param cephfs: Boolean which represents whether REANA is
            deployed with CEPH or not.
        :param cephfs_volume_size: Int to set CEPH volume size in GB.
        :param cvmfs: Boolean which represents whether REANA is
            deployed with CVMFS or not.
        :param debug: Boolean which represents whether REANA is
            deployed in debug mode or not.

        :return: A generator/iterable of generated Kubernetes YAML manifests
            as Python objects.
        """
        # Setup an Environment for Jinja
        env = Environment(loader=FileSystemLoader(
            cls._conf['templates_folder']),
                          keep_trailing_newline=False)

        # Define where are backend conf params needed when rendering templates.
        be_conf_params_fp = cls._conf['templates_folder'] + '/config.yaml'

        try:
            with open(be_conf_params_fp) as f:

                # Load backend conf params
                backend_conf_parameters = yaml.load(f.read())
                # change type of deployment (cephfs|cvmfs|hostpath)
                if cephfs or cluster_spec['cluster'].get('cephfs'):
                    backend_conf_parameters['CEPHFS'] = True
                    if not cephfs_volume_size:
                        cephfs_volume_size = \
                            cluster_spec['cluster'].get(
                                'cephfs_volume_size',
                                200)

                if debug or cluster_spec['cluster'].get('debug'):
                    backend_conf_parameters['DEBUG'] = True

                if cluster_spec['cluster'].get('cephfs_monitors'):
                    backend_conf_parameters['CEPHFS_MONITORS'] = \
                        cluster_spec['cluster'].get('cephfs_monitors')

                if cluster_spec['cluster'].get('root_path'):
                    backend_conf_parameters['ROOT_PATH'] = \
                        cluster_spec['cluster'].get('root_path')

                if cluster_spec['cluster'].get('shared_volume_path'):
                    backend_conf_parameters['SHARED_VOLUME_PATH'] = \
                        cluster_spec['cluster'].get('shared_volume_path')

                if cluster_spec['cluster'].get('db_persistence_path'):
                    backend_conf_parameters['DB_PERSISTENCE_PATH'] = \
                        cluster_spec['cluster'].get('db_persistence_path')

                # Would it be better to combine templates or populated
                # templates in Python code for improved extensibility?
                # Just drop a .yaml template and add necessary to config.yaml
                # without changing anything?

                # Load template combining all other templates from
                # templates folder
                template = env.get_template('backend_conf.yaml')

                components = cluster_spec['components']
                rs_img = components['reana-server']['image']
                rjc_img = components['reana-job-controller']['image']
                rwfc_img = components['reana-workflow-controller']['image']
                rwm_img = components['reana-workflow-monitor']['image']
                rmb_img = components['reana-message-broker']['image']

                rs_environment = components['reana-server']\
                    .get('environment', [])
                rjc_environment = components['reana-job-controller'] \
                    .get('environment', [])
                rwfc_environment = components['reana-workflow-controller'] \
                    .get('environment', [])
                rwm_environment = components['reana-workflow-monitor'] \
                    .get('environment', [])
                rmb_environment = components['reana-message-broker'] \
                    .get('environment', [])

                rs_environment = components['reana-server']\
                    .get('environment', [])
                rjc_environment = components['reana-job-controller'] \
                    .get('environment', [])
                rwfc_environment = components['reana-workflow-controller'] \
                    .get('environment', [])
                rwm_environment = components['reana-workflow-monitor'] \
                    .get('environment', [])
                rmb_environment = components['reana-message-broker'] \
                    .get('environment', [])

                rs_mountpoints = components['reana-server']\
                    .get('mountpoints', [])
                rjc_mountpoints = components['reana-job-controller']\
                    .get('mountpoints', [])
                rwfc_mountpoints = components['reana-workflow-controller']\
                    .get('mountpoints', [])
                rwm_mountpoints = components['reana-workflow-monitor'] \
                    .get('mountpoints', [])
                rmb_mountpoints = components['reana-message-broker'] \
                    .get('mountpoints', [])

                # Render the template using given backend config parameters
                cluster_conf = template.\
                    render(backend_conf_parameters,
                           REANA_URL=cluster_spec['cluster'].get(
                               'reana_url',
                               'reana.cern.ch'),
                           CEPHFS_VOLUME_SIZE=cephfs_volume_size or 1,
                           SERVER_IMAGE=rs_img,
                           JOB_CONTROLLER_IMAGE=rjc_img,
                           WORKFLOW_CONTROLLER_IMAGE=rwfc_img,
                           WORKFLOW_MONITOR_IMAGE=rwm_img,
                           MESSAGE_BROKER_IMAGE=rmb_img,
                           RS_MOUNTPOINTS=rs_mountpoints,
                           RJC_MOUNTPOINTS=rjc_mountpoints,
                           RWFC_MOUNTPOINTS=rwfc_mountpoints,
                           RWM_MOUNTPOINTS=rwm_mountpoints,
                           RMB_MOUNTPOINTS=rmb_mountpoints,
                           RS_ENVIRONMENT=rs_environment,
                           RJC_ENVIRONMENT=rjc_environment,
                           RWFC_ENVIRONMENT=rwfc_environment,
                           RWM_ENVIRONMENT=rwm_environment,
                           RMB_ENVIRONMENT=rmb_environment,
                           )
                # Strip empty lines for improved readability
                cluster_conf = '\n'.join([
                    line for line in cluster_conf.splitlines() if line.strip()
                ])
                # Should print the whole configuration in a loop
                # Now prints just memory address of generator object
                logging.debug('Loaded K8S config successfully: \n {}'.format(
                    yaml.load_all(cluster_conf)))

        except TemplateNotFound as e:
            logging.info(
                'Something wrong when fetching K8S config file templates from '
                '{filepath} : \n'
                '{error}'.format(filepath=cls._conf['templates_folder'],
                                 error=e.strerror))
            raise e
        except TemplateSyntaxError as e:
            logging.info('Something went wrong when parsing K8S template from '
                         '{filepath} : \n'
                         '{error}'.format(filepath=e.filename,
                                          error=e.strerror))
            raise e
        except IOError as e:
            logging.info(
                'Something wrong when reading K8S config parameters-file from '
                '{filepath} : \n'
                '{error}'.format(filepath=be_conf_params_fp, error=e.strerror))
            raise e

        # As Jinja rendered string is basically multiple YAML documents in one
        # string parse it with YAML-library and return a generator containing
        # independent YAML documents (split from `---`) as Python objects.
        return yaml.load_all(cluster_conf)
예제 #33
0
def load_all(stream, additional_vars={}):
    for data in yaml.load_all(stream, loader.Loader):
        yield env.interpolate(data, additional_vars)
예제 #34
0
 def parse(cls, *args):
     filename = args[0]
     if filename is not None and os.path.exists(filename):
         with open(filename) as fp:
             return yaml.load_all(fp)
예제 #35
0
def load_dataset(dataset, queried_year, queried_value):
    pointIndicatorGroup = pygame.sprite.Group()

    if dataset == "aged population":
        stream = urllib.request.urlopen(
            "http://52.187.242.219:5000/data").read()

        # stream = open("../data/output/data.yml")
        xs = []
        ys = []
        indices = []
        total_pops = []
        aged_pops = []
        proportional_pops = []
        for data in yaml.load_all(stream):
            for k in data.keys():
                # import pdb; pdb.set_trace()
                sa2_name = data[k]["sa2_name"]

                index = 0
                elder_population = 0
                total_population = 0
                proportional_pop = 0
                for year in data[k]["by_year"]:
                    if (year['year'] != queried_year):
                        continue
                    index = year["index"]
                    elder_population = year["elder_population"]
                    total_population = year["total_population"]
                    if not total_population == 0:
                        proportional_pop = year["elder_population"] / year[
                            "total_population"]

                # skip if we can't find a lat long
                try:
                    lat, long = name_to_lat_long[sa2_name.rstrip(
                        " (ACT)").upper()]
                except KeyError:
                    # print ("can't find long lat for '" + sa2_name.rstrip(" (ACT)").upper() +"'")
                    continue
                x, y = lat_long_to_x_y(lat, long)
                xs.append(x)
                ys.append(y)

                total_pops.append(total_population)
                aged_pops.append(elder_population)
                proportional_pops.append(proportional_pop)
                indices.append(index)
        # print(len(xs))
        xs, ys = normalise_utm_coords(xs, ys)
        total_pops = normalise(total_pops)
        aged_pops = normalise(aged_pops)
        proportional_pops = normalise(proportional_pops)
        indices = normalise(indices)

        values = {
            "total_pops": total_pops,
            "aged_pops": aged_pops,
            "proportional_pops": proportional_pops,
            "indices": indices
        }

        for i in range(len(xs)):
            x = xs[i]
            y = ys[i]
            value = values[queried_value][i]
            PointIndicator((x, y), value, pointIndicatorGroup)

    if dataset == "test":
        text_to_render = []  # list of (string, (x,y))
        xs = []
        ys = []

        names = []
        for _, name, lat, long in postcode_lat_long_list:
            x, y = lat_long_to_x_y(lat, long)
            xs.append(x)
            ys.append(y)
            names.append(name)

        max_x = max(xs)
        min_x = min(xs)
        max_y = max(ys)
        min_y = min(ys)

        # normalise
        for i in range(len(xs)):
            x = (xs[i] - max_x) / (max_x - min_x)
            y = -(ys[i] - max_y) / (max_y - min_y)
            # print(x, y)
            x *= map_width
            y *= map_height
            x += width
            if (not names[i] in ("TOPLEFT", "BOTTOMRIGHT")):
                pi = PointIndicator((x, y), 1, pointIndicatorGroup)
                text_to_render.append((names[i], (x, y)))

    return pointIndicatorGroup
예제 #36
0
파일: check.py 프로젝트: chipaca/snappy

if os.environ["SNAPPY_USE_STAGING_STORE"] == "1":
    snap_ids = {
        "test-snapd-tools": "02AHdOomTzby7gTaiLX3M3SGMmXDfLJp",
        "test-snapd-devmode": "FcHyKyMiQh71liP8P82SsyMXtZI5mvVj",
        "test-snapd-python-webserver": "uHjTANBWSXSiYzNOUXZNDnOSH3POSqWS",
    }
else:
    snap_ids = {
        "test-snapd-tools": "eFe8BTR5L5V9F7yHeMAPxkEr2NdUXMtw",
        "test-snapd-devmode": "821MII7GAzoRnPvTEb8R51Z1s9e0XmK5",
        "test-snapd-python-webserver": "Wcs8QL2iRQMjsPYQ4qz4V1uOlElZ1ZOb",
    }

res = list(yaml.load_all(sys.stdin))

equals("number of entries", len(res), 7)

check(
    "basic",
    res[0],
    ("name", equals, "basic"),
    ("summary", equals, "Basic snap"),
    ("path", matches, r"^basic_[0-9.]+_all\.snap$"),
    ("version", matches, verNotesRx),
    ("license", equals, "unset"),
    ("description", equals, "A basic buildable snap\n"),
    ("build-date", exists),
)
예제 #37
0
        json_dump = get_response(api_link.format(curr1, curr2))
        hasher = {}
        return self.named_tuple(type="BTC",
                                price=json_dump["lprice"],
                                curr=json_dump["curr2"])


def send_messages(key, val, conf_obj, producer):
    crypto_tuple = config.get_config(val, key)
    value = json.dumps({
        "price": str(crypto_tuple.price),
        "curr": str(crypto_tuple.curr)
    })
    producer.send(topic=key, key=crypto_tuple.type, value=value)
    print "Sending messages for", key
    return producer


if __name__ == "__main__":

    config = Config()

    while True:
        with open("ap_config.yaml", "r") as stream:
            yaml_obj = yaml.load_all(stream)
            [[
                send_messages(key, val, config, producer)
                for (key, val) in config_obj.items()
            ] for config_obj in yaml_obj]
            sleep(15)
예제 #38
0
    def load_yaml(self, yaml_str):
        """ Load the blue print, if variables are defined substitute them
    Note that the variables must be defined as an array """
        def get_variables_dict(yaml_str):
            """ get the defined variables """

            # Can't read enitre yaml as jinja2 may cause invalid yaml
            variables_str = ""
            for line in yaml_str.splitlines():
                if line.startswith('variables'):
                    variables_str += line + "\n"
                elif len(variables_str) > 0:
                    variables_str += line + "\n"
                    if len(line.strip()) == 0:
                        break
            if len(variables_str) == 0:
                # no variables defined
                return {}

            blueprint = yaml.load(variables_str, Loader=yaml.Loader)

            if 'variables' in blueprint:
                # resubstitute vairables
                var_dict = {}
                for var_list in blueprint['variables']:
                    for key, val in var_list.items():
                        if type(val) == type(""):
                            val = val.format(**var_dict)
                        #if type(val)==type({}):
                        #  if len(val)==1:
                        #    val=var_dict[list(val)[0]]

                        var_dict[key] = val
            return var_dict

        def update_yaml(yaml_str, var_dict={}):
            template = jinja2.Template(yaml_str)
            yaml_str = template.render(**var_dict)
            #open('temp.yaml','w').write(yaml_str)
            blueprints = yaml.load_all(yaml_str, Loader=yaml.Loader)
            return list(blueprints)

        var_dict = get_variables_dict(yaml_str)

        blueprints = update_yaml(yaml_str, var_dict)

        #  call import yamls
        imported = {}
        setup_bp = blueprints[0]
        if 'import' in setup_bp:
            for filename in setup_bp['import']:
                with open(filename + '.yaml') as f:
                    imported = list(yaml.load_all(f.read(),
                                                  Loader=yaml.Loader))[0]
                    setup_bp = deep_update(imported, setup_bp)

        #raise_error(list(setup_bp['analysis']))

        self.doc_meta = self.get_doc_meta(setup_bp,
                                          defaults={
                                              'output_type': 'md',
                                              'test': 'test'
                                          })
        self.doc_format = self.doc_meta['output_type']

        blueprints[0] = setup_bp

        return blueprints
예제 #39
0
 def update_yaml(yaml_str, var_dict={}):
     template = jinja2.Template(yaml_str)
     yaml_str = template.render(**var_dict)
     #open('temp.yaml','w').write(yaml_str)
     blueprints = yaml.load_all(yaml_str, Loader=yaml.Loader)
     return list(blueprints)
예제 #40
0
 def load(self, *args):
     with open(self.filename) as fp:
         return [JobBase.unserialize(job) for job in yaml.load_all(fp) if job is not None]
예제 #41
0
import yaml
import io

filename = "dynatrace_conf.yaml"
# open the file
with open(filename, 'r') as f:
  try:
    docs = yaml.load_all(f, Loader=yaml.FullLoader)
    for doc in docs:
      for k,v in doc.items():
        if "name" in k:
          print k, "->", v
        if "url" in k:
          print k, "->", v
      print "\n"
  except yaml.YAMLError as exc:
    print(exc)
예제 #42
0
import os
import sys
import subprocess as sp
import yaml

cfile = open('probe/config.yaml', 'r')
cconfig = yaml.load_all(cfile)
for datac in cconfig:
    c = datac


def build_wav(source_video):
    wav_out = c['wav_sources'] + os.path.split(source_video)[1] + '.wav'
    try:
        wav_cmd = [
            'ffmpeg', '-y', '-i', source_video, '-vn', '-sn', '-map', '0:a',
            wav_out
        ]
        sp.check_output(wav_cmd, stderr=sp.STDOUT)
    except sp.SubprocessError:
        sys.exit("Error: WAV audio extraction")
    finally:
        return wav_out


def build_ogg(source_wav):
    ogg_out = c['ogg_sources'] + os.path.split(source_wav)[1] + '.ogg'
    try:
        ogg_cmd = ['oggenc', source_wav, '-o', ogg_out]
        sp.check_output(ogg_cmd, stderr=sp.STDOUT)
    except sp.SubprocessError:
예제 #43
0
M_PER_TICK = SPEED_OF_LIGHT / LOCODECK_TS_FREQ

if len(sys.argv) > 1:
    for arg in sys.argv[1:]:
        if arg == 'm':
            unit = 'meters'
        elif arg == 't':
            unit = 'ticks'
        else:
            print("usage: {} [unit]".format(sys.argv[0]))
            print(
                "  unit: t = ticks (default), m = meters (excluding antenna delay)"
            )
            sys.exit(1)

for packet in yaml.load_all(sys.stdin, Loader=yaml.CLoader):
    if not packet:
        continue

    data = {'id': packet['from'], 'tof': {}}

    for remote in packet['remoteAnchorData']:
        if 'distance' in remote:
            tof = remote['distance']
            remote_id = remote['id']
            if unit == 'ticks':
                data['tof'][remote_id] = tof
            if unit == 'meters':
                data['tof'][remote_id] = tof * M_PER_TICK - ANTENNA_OFFSET

    print("---")
예제 #44
0
파일: __init__.py 프로젝트: VRDate/yq
def yq(input_streams=None,
       output_stream=None,
       input_format="yaml",
       output_format="json",
       program_name="yq",
       width=None,
       indentless_lists=False,
       xml_root=None,
       xml_dtd=False,
       xml_force_list=frozenset(),
       explicit_start=False,
       explicit_end=False,
       jq_args=frozenset(),
       exit_func=None):
    if not input_streams:
        input_streams = [sys.stdin]
    if not output_stream:
        output_stream = sys.stdout
    if not exit_func:
        exit_func = sys.exit
    converting_output = True if output_format != "json" else False

    try:
        # Note: universal_newlines is just a way to induce subprocess to make stdin a text buffer and encode it for us
        jq = subprocess.Popen(
            ["jq"] + list(jq_args),
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE if converting_output else None,
            universal_newlines=True)
    except OSError as e:
        msg = "{}: Error starting jq: {}: {}. Is jq installed and available on PATH?"
        exit_func(msg.format(program_name, type(e).__name__, e))

    try:
        if converting_output:
            # TODO: enable true streaming in this branch (with asyncio, asyncproc, a multi-shot variant of
            # subprocess.Popen._communicate, etc.)
            # See https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
            use_annotations = True if output_format == "annotated_yaml" else False
            input_docs = []
            for input_stream in input_streams:
                if input_format == "yaml":
                    loader = get_loader(use_annotations=use_annotations)
                    input_docs.extend(
                        yaml.load_all(input_stream, Loader=loader))
                elif input_format == "xml":
                    import xmltodict
                    input_docs.append(
                        xmltodict.parse(input_stream.read(),
                                        disable_entities=True,
                                        force_list=xml_force_list))
                elif input_format == "toml":
                    import toml
                    input_docs.append(toml.load(input_stream))
                else:
                    raise Exception("Unknown input format")
            input_payload = "\n".join(
                json.dumps(doc, cls=JSONDateTimeEncoder) for doc in input_docs)
            jq_out, jq_err = jq.communicate(input_payload)
            json_decoder = json.JSONDecoder(object_pairs_hook=OrderedDict)
            if output_format == "yaml" or output_format == "annotated_yaml":
                yaml.dump_all(decode_docs(jq_out, json_decoder),
                              stream=output_stream,
                              Dumper=get_dumper(
                                  use_annotations=use_annotations,
                                  indentless=indentless_lists),
                              width=width,
                              allow_unicode=True,
                              default_flow_style=False,
                              explicit_start=explicit_start,
                              explicit_end=explicit_end)
            elif output_format == "xml":
                import xmltodict
                for doc in decode_docs(jq_out, json_decoder):
                    if xml_root:
                        doc = {xml_root: doc}
                    elif not isinstance(doc, OrderedDict):
                        msg = (
                            "{}: Error converting JSON to XML: cannot represent non-object types at top level. "
                            "Use --xml-root=name to envelope your output with a root element."
                        )
                        exit_func(msg.format(program_name))
                    full_document = True if xml_dtd else False
                    try:
                        xmltodict.unparse(doc,
                                          output=output_stream,
                                          full_document=full_document,
                                          pretty=True,
                                          indent="  ")
                    except ValueError as e:
                        if "Document must have exactly one root" in str(e):
                            raise Exception(
                                str(e) +
                                " Use --xml-root=name to envelope your output with a root element"
                            )
                        else:
                            raise
                    output_stream.write(b"\n" if sys.version_info < (
                        3, 0) else "\n")
            elif output_format == "toml":
                import toml
                for doc in decode_docs(jq_out, json_decoder):
                    if not isinstance(doc, OrderedDict):
                        msg = "{}: Error converting JSON to TOML: cannot represent non-object types at top level."
                        exit_func(msg.format(program_name))

                    if USING_PYTHON2:
                        # For Python 2, dump the string and encode it into bytes.
                        output = toml.dumps(doc)
                        output_stream.write(output.encode("utf-8"))
                    else:
                        # For Python 3, write the unicode to the buffer directly.
                        toml.dump(doc, output_stream)
        else:
            if input_format == "yaml":
                loader = get_loader(use_annotations=False)
                for input_stream in input_streams:
                    for doc in yaml.load_all(input_stream, Loader=loader):
                        json.dump(doc, jq.stdin, cls=JSONDateTimeEncoder)
                        jq.stdin.write("\n")
            elif input_format == "xml":
                import xmltodict
                for input_stream in input_streams:
                    json.dump(
                        xmltodict.parse(input_stream.read(),
                                        disable_entities=True,
                                        force_list=xml_force_list), jq.stdin)
                    jq.stdin.write("\n")
            elif input_format == "toml":
                import toml
                for input_stream in input_streams:
                    json.dump(toml.load(input_stream), jq.stdin)
                    jq.stdin.write("\n")
            else:
                raise Exception("Unknown input format")

            try:
                jq.stdin.close()
            except Exception:
                pass
            jq.wait()
        for input_stream in input_streams:
            input_stream.close()
        exit_func(jq.returncode)
    except Exception as e:
        exit_func("{}: Error running jq: {}: {}.".format(
            program_name,
            type(e).__name__, e))
예제 #45
0
def build_from_dir(dirname, output_file):
    """
    Read a GlobalMind database exported in YAML files, translate
    it into ConceptNet 5 edges, and write those edges to disk using
    a JSONStreamWriter.
    """
    out = JSONStreamWriter(output_file)
    userdata = yaml.load_all(open(dirname + '/GMUser.yaml'))
    users = {}

    for userinfo in userdata:
        users[userinfo['pk']] = userinfo

    frame_data = yaml.load_all(open(dirname + '/GMFrame.yaml'))
    frames = {}
    for frame in frame_data:
        frames[frame['pk']] = frame['fields']

    assertiondata = yaml.load_all(open(dirname + '/GMAssertion.yaml'))
    assertions = {}
    for assertion in assertiondata:
        obj = assertion['fields']
        frame = frames[obj['frame']]
        frametext = frame['text']
        userinfo = users[obj['author']]
        username = userinfo['fields']['username']

        # GlobalMind provides information about what country the user is from, which
        # we can preserve in the contributor URI.
        #
        # If I got to re-choose these URIs, I would distinguish usernames with
        # a country code from those without a country code by something more
        # than the number of slashes, and I would write the country code in
        # capital letters.
        userlocale = userinfo['fields']['ccode'].lower()
        if userlocale:
            user_source = "/s/contributor/globalmind/%s/%s" % (userlocale,
                                                               username)
        else:
            user_source = "/s/contributor/globalmind/%s" % username

        sources = [user_source, "/s/activity/globalmind/assert"]

        lang = LANG_CODES[obj['lcode']]
        start = normalized_concept_uri(lang, obj['node1'])
        end = normalized_concept_uri(lang, obj['node2'])
        rel = '/r/' + RELATION_MAP.get(frame['relation'], frame['relation'])

        # fix messy english "around in"
        if ' around ' in frametext:
            if obj['node2'].startswith('in '):
                frametext = frametext.replace(' around ', ' in ')
                obj['node2'] = obj['node2'][3:]
            else:
                frametext = frametext.replace(' around ', ' near ')
                rel = '/r/LocatedNear'

        # fix more awkward English. I wonder how bad the other languages are.
        frametext = frametext.replace('hits your head', 'comes to mind')
        frametext = frametext.replace(': [node1], [node2]',
                                      ' [node1] and [node2]')

        node1 = u'[[' + obj['node1'] + u']]'
        node2 = u'[[' + obj['node2'] + u']]'
        surfaceText = frametext.replace('//', '').replace('[node1]',
                                                          node1).replace(
                                                              '[node2]', node2)
        edge = make_edge(rel,
                         start,
                         end,
                         dataset='/d/globalmind',
                         license='/l/CC/By',
                         sources=sources,
                         surfaceText=surfaceText,
                         weight=1)
        out.write(edge)
        assertions[assertion['pk']] = edge

    translationdata = yaml.load_all(open(dirname + '/GMTranslation.yaml'))
    for translation in translationdata:
        obj = translation['fields']
        assertion1 = assertions[obj['assertion1']]
        assertion2 = assertions[obj['assertion2']]
        start = assertion1['uri']
        end = assertion2['uri']
        rel = '/r/TranslationOf'
        text1 = assertion1['surfaceText'].replace('[[', '').replace(']]', '')
        text2 = assertion2['surfaceText'].replace('[[', '').replace(']]', '')
        lang1 = LANG_NAMES[get_lang(assertion1)]
        lang2 = LANG_NAMES[get_lang(assertion2)]
        surfaceText = u"[[%s]] in %s means [[%s]] in %s." % (text1, lang1,
                                                             text2, lang2)
        userinfo = users[obj['author']]
        username = userinfo['fields']['username']

        userlocale = userinfo['fields']['ccode'].lower()
        if userlocale:
            user_source = "/s/contributor/globalmind/%s/%s" % (userlocale,
                                                               username)
        else:
            user_source = "/s/contributor/globalmind/%s" % username

        sources = [user_source, "/s/activity/globalmind/translate"]
        edge = make_edge(rel,
                         start,
                         end,
                         dataset='/d/globalmind',
                         license=Licenses.cc_attribution,
                         sources=sources,
                         surfaceText=surfaceText,
                         weight=1)
        out.write(edge)
예제 #46
0
 def _load_from_file_path(self, path):
     with open(path) as source_file:
         for document in yaml.load_all(source_file):
             self._add_indicator_from_dict(document)
예제 #47
0
    test.columns[0]: test[test.columns[0]],
    test.columns[1]: test[test.columns[1]],
    ss.columns[2]: preds
})

preds.to_csv('nnSolSubmit3.csv', index=False)

import json

with open("kerasModelJson.txt") as json_file:
    json_data = json.load(json_file)

import yaml

stream = open("kerasModel.yml", "r")
docs = yaml.load_all(stream)

import pandas as pd
import numpy as np
import xgboost as xgb
import copy
import array
from sknn.mlp import Classifier, Layer
import cPickle as pickle
import numpy as np
import scipy.sparse
from scipy.sparse import hstack
import cPickle as pickle
import numpy as np
import scipy.sparse
import pandas as pd
예제 #48
0
파일: fizzle.py 프로젝트: drtconway/zotmer
def main(argv):
    global verbose

    opts = docopt.docopt(__doc__, argv)

    verbose = opts['-v']

    genomeDir = '.'
    if opts['-g']:
        genomeDir = opts['-g']
    sf = SequenceFactory(genomeDir)

    if opts['-P']:
        if opts['-t']:
            prepareBedFileGeneTx(opts['<gene-list>'], opts['<refgene>'],
                                 opts['<bedfile>'])
        else:
            prepareBedFileGene(opts['<gene-list>'], opts['<refgene>'],
                               opts['<bedfile>'])
        return

    if opts['-X']:
        with openFile(opts['<index>'], 'w') as out:
            yaml.safe_dump_all(indexBedFiles(opts['<must-have>'], sf),
                               out,
                               default_flow_style=False)
        return

    K = int(opts['-k'])
    minGeneReads = int(opts['-M'])
    minExonReads = int(opts['-m'])
    minGeneRate = float(opts['-R'])
    minExonRate = float(opts['-r'])
    (minGeneCount, maxGeneCount) = map(int, opts['-Z'].split(':'))
    (minExonCount, maxExonCount) = map(int, opts['-z'].split(':'))

    with openFile(opts['<index>']) as f:
        ref = list(yaml.load_all(f, Loader=yaml.BaseLoader))

    if True:
        # Test the double-layer index
        idx = ExonIndex(K, ref)

        acc = {}
        toc = {}
        rn = 0
        for itm in reads(opts['<input>'],
                         K=K,
                         paired=True,
                         reads=True,
                         kmers=False,
                         both=True,
                         verbose=verbose):
            rn += 1
            (lhsFwd, lhsRev) = kmersLists(K, itm.reads[0][1])
            (rhsFwd, rhsRev) = kmersLists(K, itm.reads[1][1])
            xs0 = lhsFwd + rhsRev
            rh0 = idx.readHash(xs0)
            if rh0 is not None:
                (h0, ys0) = rh0
                if h0 not in acc:
                    acc[h0] = []
                    toc[h0] = ys0
                acc[h0].append((compressRead(itm.reads[0][1]),
                                compressRead(itm.reads[1][1])))

            xs1 = lhsRev + rhsFwd
            rh1 = idx.readHash(xs1)
            if rh1 is not None:
                (h1, ys1) = rh1
                if h1 not in acc:
                    acc[h1] = []
                    toc[h1] = ys1
                acc[h1].append((compressRead(itm.reads[0][1]),
                                compressRead(itm.reads[1][1])))

        nx = 0
        for h in sorted(acc.keys()):
            for (x, c) in sorted(acc[h].items()):
                nx += 1
                if c <= 1:
                    continue
                print '%016x\t%s\t%d' % (h, render(K, x), c)

        print >> sys.stderr, 'nx =', nx
        return

    if False:
        # Position index
        idx = {}
        for i in range(len(ref)):
            itm = ref[i]
            for (x, p) in kmersWithPosList(K, itm['seq'], False):
                p -= 1
                if x not in idx:
                    idx[x] = []
                idx[x].append((i, p))

    if True:
        # Exon tuple index
        idx = {}
        lens = [0 for i in range(len(ref))]
        for i in range(len(ref)):
            itm = ref[i]
            for (x, p) in kmersWithPosList(K, itm['seq'], False):
                if x not in idx:
                    idx[x] = set([])
                idx[x].add(i)
                lens[i] += 1
        for x in idx.iterkeys():
            idx[x] = tuple(sorted(idx[x]))

    if opts['-T']:
        ak = {}
        for x in sorted(idx.iterkeys()):
            if len(idx[x]) == 1:
                continue
            xStr = render(K, x)
            ak[xStr] = []
            for i in idx[x]:
                itm = ref[i]
                k = '%s/%s' % (itm['gene'], itm['exon'])
                ak[xStr].append(k)
            ak[xStr].sort()
        rep = {}
        rep['aliasing-within'] = ak
        chrs = set([])
        for i in range(len(ref)):
            itm = ref[i]
            chrs.add(itm['chr'])
        counts = [0 for i in range(len(ref))]
        for ch in sorted(chrs):
            if verbose:
                print >> sys.stderr, 'processing %s' % (ch, )
            seq = sf[ch]
            for (x, p) in kmersWithPos(K, seq, True):
                if x not in idx:
                    continue
                for i in idx[x]:
                    counts[i] += 1
        gk = {}
        for i in range(len(ref)):
            if lens[i] == counts[i]:
                continue
            itm = ref[i]
            k = '%s/%s' % (itm['gene'], itm['exon'])
            gk[k] = {'indexed': lens[i], 'genomic': counts[i]}
        rep['aliasing-genomic'] = gk
        yaml.safe_dump(rep, sys.stdout, default_flow_style=False)
        return

    acc = {}
    rn = 0
    hitStats = Summary()
    hitHist = [0 for i in range(1000)]
    for itm in reads(opts['<input>'],
                     K=K,
                     paired=True,
                     reads=True,
                     kmers=False,
                     both=True,
                     verbose=verbose):
        rn += 1
        (lhsFwd, lhsRev) = kmersWithPosLists(K, itm.reads[0][1])
        (rhsFwd, rhsRev) = kmersWithPosLists(K, itm.reads[1][1])
        (hits0, hitCount0) = recHits(idx, lhsFwd + rhsRev)
        (hits1, hitCount1) = recHits(idx, lhsRev + rhsFwd)
        if len(hits0) > 0:
            k = tuple(sorted(hits0.keys()))
            v = sum(hits0.values())
            if k not in acc:
                acc[k] = [0, 0]
            acc[k][0] += 1
            acc[k][1] += v
            hitStats.add(hitCount0)
            hitHist[hitCount0] += 1

        if len(hits1) > 0:
            k = tuple(sorted(hits1.keys()))
            v = sum(hits1.values())
            if k not in acc:
                acc[k] = [0, 0]
            acc[k][0] += 1
            acc[k][1] += v
            hitStats.add(hitCount1)
            hitHist[hitCount1] += 1

    if verbose:
        print >> sys.stderr, 'total read hits: %d' % (len(hitStats), )
        print >> sys.stderr, 'total hits per read: %g (%g)' % (hitStats.mean(),
                                                               hitStats.sd())
        print >> sys.stderr, 'total reads: %d' % (rn, )
        for i in range(len(hitHist)):
            if hitHist[i] > 0:
                print >> sys.stderr, '\t%d\t%d' % (i, hitHist[i])

    def gex(s):
        r = []
        for n in s:
            itm = ref[n]
            r.append('%s/%s' % (itm['gene'], itm['exon']))
        return '|'.join(r)

    def fmtKey(k):
        nex = len(k)
        gx = set([])
        kStrParts = []
        for s in k:
            kStrParts.append(gex(s))
            gx |= set([ref[i]['gene'] for i in s])
        kStr = '--'.join(sorted(kStrParts))
        return (nex, gx, kStr)

    gxCounts = {}
    for k in acc.keys():
        gx = set([])
        ex = set([])
        for s in k:
            gx |= set([ref[i]['gene'] for i in s])
            ex |= set(s)
        gx = tuple(sorted(gx))
        if gx not in gxCounts:
            gxCounts[gx] = [0, 0]
        gxCounts[gx][0] += acc[k][0]
        gxCounts[gx][1] += acc[k][1]

    hdr = ['numReads', 'numKmers', 'kmersPerRead']
    hdr += ['ggNumReads', 'ggNumKmers', 'ggKmersPerRead']
    hdr += ['numExons', 'numGenes', 'geneGroup', 'exonGroup']
    print '\t'.join(hdr)
    for k in acc.keys():
        (nex, gx, kStr) = fmtKey(k)
        gx = tuple(sorted(gx))
        if len(gx) < minGeneCount or len(gx) > maxGeneCount:
            continue
        if len(ex) < minExonCount or len(ex) > maxExonCount:
            continue
        if gxCounts[gx][0] < minGeneReads:
            continue
        if acc[k][0] < minExonReads:
            continue
        gxRate = float(gxCounts[gx][1]) / float(gxCounts[gx][0])
        if gxRate < minGeneRate:
            continue
        exRate = float(acc[k][1]) / float(acc[k][0])
        if exRate < minExonRate:
            continue
        gxStr = ':'.join(gx)

        print '%d\t%d\t%g\t%d\t%d\t%g\t%d\t%d\t%s\t%s' % (
            acc[k][0], acc[k][1], exRate, gxCounts[gx][0], gxCounts[gx][1],
            gxRate, nex, len(gx), gxStr, kStr)
csv['spec']['install']['spec']['clusterPermissions'] = []

# Add operator role to the CSV:
with open('deploy/role.yaml', 'r') as stream:
    operator_role = yaml.load(stream)
    csv['spec']['install']['spec']['clusterPermissions'].append(
        {
            'rules': operator_role['rules'],
            'serviceAccountName': operator_name,
        })

# Add our deployment spec for the operator:
with open('deploy/operator.yaml', 'r') as stream:
    operator_components = []
    operator = yaml.load_all(stream)
    for doc in operator:
        operator_components.append(doc)
    # There is only one yaml document in the operator deployment
    operator_deployment = operator_components[0]
    csv['spec']['install']['spec']['deployments'][0]['spec'] = operator_deployment['spec']

# Update the deployment to use the defined image:
csv['spec']['install']['spec']['deployments'][0]['spec']['template']['spec']['containers'][0]['image'] = operator_image

# Update the versions to include git hash:
csv['metadata']['name'] = "{}.v{}".format(operator_name, full_version)
csv['spec']['version'] = full_version
csv['spec']['replaces'] = "{}.v{}".format(operator_name, prev_version)

# Set the CSV createdAt annotation:
예제 #50
0
def include_slides(filename):
    """ loads efficient yaml slide definitions as slides to be put in your main presentation definition"""
    p = presenter

    slides = yaml.load_all(file(filename))

    background = 'bg'

    for i, slide in enumerate(slides):
        if slide == None:
            continue
        if 'bookmark' in slide:
            p.bookmark(slide['bookmark'])
        elif 'title' in slide:
            p.bookmark(slide['title'])
        elif 'bmark' in slide:
            p.bookmark(slide['bmark'])
        else:
            p.bookmark(filename + ' - ' + str(i))

        if 'background' in slide:
            background = slide['background']

        if 'bg' in slide:
            background = slide['bg']

        if 'pdf' in slide and 'slides' in slide:
            images = pdf2ppm_cache(slide['pdf'], slide['slides'])
            p.play(
                load_image_slides(images,
                                  library='pdf',
                                  background=background,
                                  content=slide.get('content', None)))
            p.pause()
        elif 'svg' in slide:
            images = svg2png_cache(slide['svg'])
            p.play(
                load_image_slides(images,
                                  library='svg',
                                  background=background,
                                  content=slide.get('content', None)))
            p.pause()
        elif 'image_files' in slide:
            images = imagefiles_to_images(slide['image_files'])
            p.play(
                load_image_slides(images,
                                  library='image_files',
                                  background=background,
                                  content=slide.get('content', None)))
            p.pause()

        elif 'slideshow' in slide:
            images = imagefiles_to_images(slide['slideshow'])
            slideshow_anim = images_slideshow(images,
                                              library='image_files',
                                              background=background,
                                              delay=slide.get('delay', 2.0),
                                              repeat=slide.get('repeat', 1),
                                              fade_time=slide.get(
                                                  'fade_time', 0.5))

            p.play(slideshow_anim)
            p.pause()

        elif 'images' in slide:
            if 'library' in slide:
                lib = slide['library']
            else:
                lib = 'default'
            p.play(
                load_image_slides(slide['images'],
                                  library=lib,
                                  background=background,
                                  content=slide.get('content', None)))
            p.pause()

        elif 'rst' in slide:
            images = rst2ppm_cache(i, slide.get('title', ''), slide['rst'],
                                   slide.get('rst_style'))
            p.play(
                load_image_slides(images,
                                  library='pdf',
                                  background=background,
                                  content=slide.get('content', None)))
            p.pause()

        else:
            p.play(load_slide_content(slide['content']))
            p.pause()
예제 #51
0
def run_test(command):
    wrapped_command = "cd %s && %s" % (root_dir, command)
    pipe = subprocess.Popen(
        wrapped_command, shell=True,
    )
    pipe.wait()
    if pipe.returncode == 0:
        print(green("TEST PASSED"))
    else:
        print(red("TEST FAILED"))
    return pipe.returncode

# load the script tests from the .travis.yml file
with open(os.path.join(root_dir, '.travis.yml')) as stream:
    travis_yml = yaml.load_all(stream.read())
config = travis_yml.next()
tests = config['script']

# run the tests
if isinstance(tests, (str, unicode)):
    returncode = run_test(tests)
elif isinstance(tests, (list, tuple)):
    returncode = 0
    for test in tests:
        returncode += run_test(test)

if returncode == 0:
    print(green("ALL TESTS PASSED"))
else:
    print(red("SOME TESTS FAILED, SEE ABOVE"))
예제 #52
0
def load_yaml(fn):
    with session_scope() as s:
        for data in yaml.load_all(open(fn), Loader=yaml.FullLoader):
            bookyml = Book(**data)
            s.add(bookyml)
예제 #53
0
def gather_yaml(path):
    ships = []
    extra_info = get_parts(path)
    for e in os.listdir(path):
        fullpath = os.path.join(path, e)
        if os.path.isfile(fullpath):
            if e.endswith('.yaml'):
                for ship in yaml.load_all(open(fullpath, 'r')):
                    try:
                        set_default(ship, 'info', extra_info)
                        ship['path'] = path

                        # Get image width/height
                        im = Image.open(os.path.join(path, ship['filename']))
                        ship['image_size'] = im.size

                        # Pull out size keys into standard location
                        size_axis = 0
                        for (dim, axis) in dimension_axes.items():
                            if dim in ship['info']:
                                ship['info']['Size'] = ship['info'][dim]
                                ship['info']['Dimension'] = dim
                                size_axis = axis
                                break

                        # Initialize scale information
                        m = px = unit = m_per_px = None

                        # First, check for explicit x_per_px
                        for pre in unit_conversions.keys():
                            key = pre + '_per_px'
                            if key in ship:
                                m_per_px = ship[key]
                                unit = pre
                                if isinstance(m_per_px, str):
                                    (m, px) = m_per_px.split('/')
                                break

                        # Otherwise, load from the size keys
                        if not m_per_px and 'Size' in ship['info']:
                            m = ship['info']['Size']
                            # Load px from correct image axis
                            if 'size_px' in ship:
                                px = ship['size_px']
                            else:
                                px = im.size[size_axis]

                        # Look for a unit as well
                        if ('Unit' in ship['info']) and ship['info']['Unit']:
                            unit = ship['info']['Unit']

                        # At this point we should have something
                        if m and px:
                            m_per_px = float(m) / float(px)

                        if unit and unit != 'm':
                            print("Converting {} to m".format(unit))
                            m_per_px = m_per_px * unit_conversions[unit]

                        # Assign to ship
                        if m_per_px:
                            ship['m_per_px'] = m_per_px
                            ship['real_size'] = [
                                d * ship['m_per_px']
                                for d in ship['image_size']
                            ]
                            if 'Size' not in ship['info']:
                                ship['info']['Size'] = round_to_n(
                                    ship['real_size'][0], 3)
                                ship['info']['Dimension'] = 'Size'
                                ship['info']['Size Notes'] = 'approximately'
                        else:
                            # If we don't have things by now throw a fit
                            raise ArithmeticError("Couldn't determine m/px!")

                        ships.append(ship)
                    except ValueError as e:
                        print("Failed to load ship from {}:\n{}\n{}".format(
                            fullpath, str(e), str(ship)))
                    except IOError as e:
                        print("Failed to load image from {}:\n{}\n{}".format(
                            fullpath, str(e), str(ship)))
                    except ArithmeticError as e:
                        print("Failed to load ship from {}:\n{}\n{}".format(
                            fullpath, str(e), str(ship)))

        elif os.path.isdir(fullpath):
            ships += gather_yaml(fullpath)

    return ships
def evaluate_models():

    stream = open("evaluate_models.yml", 'r')
    docs = yaml.load_all(stream)
    for doc in docs:
        true_pos_reward = doc['true_pos_reward']
        true_neg_reward = doc['true_neg_reward']
        false_pos_penalty = doc['false_pos_penalty']
        false_neg_penalty = doc['false_neg_penalty']

        pic_status = doc['pics']
        k = doc['k']

    if os.path.isfile('./model_scores.csv'):
        model_scores_df = pd.read_csv('./model_scores.csv')
        for col in model_scores_df.columns:
            if 'Unnamed' in col:
                model_scores_df = model_scores_df.drop(col, 1)
    else:
        model_scores_df = pd.DataFrame(columns=[
            'model', 'score', 'recall_at_' + str(k), 'precision_at_' + str(k)
        ])

    filenames = []
    scores = []
    p_at_k = []
    r_at_k = []

    cost_mtx = {
        '[1, 1]': true_pos_reward,
        '[0, 0]': true_neg_reward,
        '[1, 0]': false_pos_penalty,
        '[0, 1]': false_neg_penalty
    }

    for fn in os.listdir('./model_output/'):
        if (os.path.isfile('./model_output/tracker.s') == False) or (
                os.path.isfile('./model_output/tracker.s') == True
                and os.path.getmtime('./model_output/' + fn) >
                os.path.getmtime('./model_output/tracker.s')):
            print fn
            filenames.append(fn)
            result_df_tup = pickle.load(open('./model_output/' + fn, "rb"))
            result_df = result_df_tup[0]
            top_3_feats = result_df_tup[1]

            temp_df = pd.DataFrame(columns=[
                'model', 'timestamp', 'score', 'recall_at_' +
                str(k), 'precision_at_' + str(k), 'top_3_feats'
            ])
            temp_df.model = [fn]

            pr = pr_at_k(k, result_df)
            s = score_at_k(k, result_df, cost_mtx)

            temp_df.score = [s]
            temp_df['precision_at_' + str(k)] = [pr[0]]
            temp_df['recall_at_' + str(k)] = [pr[1]]
            temp_df['timestamp'] = [datetime.datetime.now()]
            temp_df['top_3_feats'] = [top_3_feats]

            model_scores_df = model_scores_df.append(temp_df,
                                                     ignore_index=True)
            model_scores_df.to_csv('model_scores.csv', index=False)

            if (pic_status):
                plot_pr_curve(result_df, fn[:-2])

    if os.path.isfile('./model_output/tracker.s') == False:
        open('./model_output/tracker.s', 'w+')
    else:
        os.utime('./model_output/tracker.s', None)
예제 #55
0
 def load_names_aliases(self):
     self.name_aliases = [doc for doc in load_all(self.geo_coding_file)]
예제 #56
0
파일: rook.py 프로젝트: shao-xy/ceph
def rook_operator(ctx, config):
    cluster_name = config['cluster']
    rook_branch = config.get('rook_branch', 'master')
    rook_git_url = config.get('rook_git_url', 'https://github.com/rook/rook')

    log.info(f'Cloning {rook_git_url} branch {rook_branch}')
    ctx.rook[cluster_name].remote.run(args=[
        'rm',
        '-rf',
        'rook',
        run.Raw('&&'),
        'git',
        'clone',
        '--single-branch',
        '--branch',
        rook_branch,
        rook_git_url,
        'rook',
    ])

    # operator.yaml
    operator_yaml = ctx.rook[cluster_name].remote.read_file(
        'rook/cluster/examples/kubernetes/ceph/operator.yaml')
    rook_image = config.get('rook_image')
    if rook_image:
        log.info(f'Patching operator to use image {rook_image}')
        crs = list(yaml.load_all(operator_yaml, Loader=yaml.FullLoader))
        assert len(crs) == 2
        crs[1]['spec']['template']['spec']['containers'][0][
            'image'] = rook_image
        operator_yaml = yaml.dump_all(crs)
    ctx.rook[cluster_name].remote.write_file('operator.yaml', operator_yaml)

    op_job = None
    try:
        log.info('Deploying operator')
        _kubectl(ctx, config, [
            'create',
            '-f',
            'rook/cluster/examples/kubernetes/ceph/crds.yaml',
            '-f',
            'rook/cluster/examples/kubernetes/ceph/common.yaml',
            '-f',
            'operator.yaml',
        ])

        # on centos:
        if teuthology.get_distro(ctx) == 'centos':
            _kubectl(ctx, config, [
                '-n', 'rook-ceph', 'set', 'env', 'deploy/rook-ceph-operator',
                'ROOK_HOSTPATH_REQUIRES_PRIVILEGED=true'
            ])

        # wait for operator
        op_name = None
        with safe_while(sleep=10, tries=90,
                        action="wait for operator") as proceed:
            while not op_name and proceed():
                p = _kubectl(
                    ctx,
                    config,
                    [
                        '-n', 'rook-ceph', 'get', 'pods', '-l',
                        'app=rook-ceph-operator'
                    ],
                    stdout=BytesIO(),
                )
                for line in p.stdout.getvalue().decode(
                        'utf-8').strip().splitlines():
                    name, ready, status, _ = line.split(None, 3)
                    if status == 'Running':
                        op_name = name
                        break

        # log operator output
        op_job = _kubectl(
            ctx,
            config,
            ['-n', 'rook-ceph', 'logs', '-f', op_name],
            wait=False,
            logger=log.getChild('operator'),
        )

        yield

    except Exception as e:
        log.exception(e)
        raise

    finally:
        log.info('Cleaning up rook operator')
        _kubectl(ctx, config, [
            'delete',
            '-f',
            'operator.yaml',
        ])
        if False:
            # don't bother since we'll tear down k8s anyway (and this mysteriously
            # fails sometimes when deleting some of the CRDs... not sure why!)
            _kubectl(ctx, config, [
                'delete',
                '-f',
                'rook/cluster/examples/kubernetes/ceph/common.yaml',
            ])
            _kubectl(ctx, config, [
                'delete',
                '-f',
                'rook/cluster/examples/kubernetes/ceph/crds.yaml',
            ])
        ctx.rook[cluster_name].remote.run(
            args=['rm', '-rf', 'rook', 'operator.yaml'])
        if op_job:
            op_job.wait()
        run.wait(ctx.cluster.run(args=['sudo', 'rm', '-rf', '/var/lib/rook']))
예제 #57
0
    'imageadd', 'imagedel', 'spellcheck', 'experiment', 'balance', 'code_imp',
    'refactor', 'config', 'admin', 'server'
]


def dictToTuples(inp):
    return [(k, v) for k, v in inp.items()]


changelog_cache = os.path.join(args.ymlDir, '.all_changelog.yml')

failed_cache_read = True
if os.path.isfile(changelog_cache):
    try:
        with open(changelog_cache, encoding='utf-8') as f:
            (_, all_changelog_entries) = yaml.load_all(f,
                                                       Loader=yaml.SafeLoader)
            failed_cache_read = False

            # Convert old timestamps to newer format.
            new_entries = {}
            for _date in all_changelog_entries.keys():
                ty = type(_date).__name__
                # print(ty)
                if ty in ['str', 'unicode']:
                    temp_data = all_changelog_entries[_date]
                    _date = datetime.strptime(_date, dateformat).date()
                    new_entries[_date] = temp_data
                else:
                    new_entries[_date] = all_changelog_entries[_date]
            all_changelog_entries = new_entries
    except Exception as e:
예제 #58
0
def layers():
    path = os.path.join(path_yaml_examples, 'layers_metadata.yml')
    stream = open(path, 'r')
    yield yaml.load_all(stream)
    stream.close()
예제 #59
0
def index_coro(dirpath_lwc_root, indices=None):  # pylint: disable=R0912
    """
    Yield mappings corresponding to the sent sequence of multi-key tuples.

    """
    if indices is not None:
        (line_index, references_index, objects_index) = indices
    else:
        line_index = None
        references_index = None
        objects_index = None

    line_idx_builder = da.util.index_builder_coro(line_index)
    references_idx_builder = da.util.index_builder_coro(references_index)
    objects_idx_builder = da.util.index_builder_coro(objects_index)
    matcher = _id_matcher_coro(dirpath_lwc_root)

    while True:

        indices = (line_index, references_index, objects_index)
        build_unit = (yield indices)
        file = build_unit['file']
        relpath = build_unit['relpath']

        # The first pass over the file is for
        # relatively unsophisticated indexing
        # - not accounting for any formatting
        # of the file other than the presence
        # of newline delimiters.
        #
        file.seek(0)
        for iline, binary_line in enumerate(file):
            text_line = binary_line.decode('utf-8')
            for (match_class, idstr, line_offset,
                 col_offset) in matcher.send(text_line):
                line_num = 1 + iline + line_offset
                col_num = 1 + col_offset
                pos = (line_num, col_num)
                line_index = line_idx_builder.send(
                    (match_class, idstr, relpath, pos))

        # The second pass over the file takes
        # account of the file format - We try
        # to parse the file to identify the
        # context within which each identifier
        # is placed.
        #
        file.seek(0, os.SEEK_SET)

        # YAML files are grist to the mill --
        # any part of a YAML data structure
        # is potentially of interest to us.
        #
        if relpath.endswith('yaml'):
            for data in yaml.load_all(file, Loader=da.util.marked_yaml.Loader):

                (maybe_ref_idx,
                 maybe_obj_idx) = _index_yaml(references_idx_builder,
                                              objects_idx_builder, matcher,
                                              relpath, data)
                if maybe_ref_idx is not None:
                    references_index = maybe_ref_idx
                if maybe_obj_idx is not None:
                    objects_index = maybe_obj_idx

        # We are only really interested in YAML or
        # JSON structures that are embedded in
        # comments or docstrings within the Python
        # source document.
        #
        # ---
        # i00022_store_requirements_in_python_source_documents:
        #   - "The system SHALL extract and process requirement clauses stored
        #     as comments in Python source documents."
        #   - notes: "We want to encourage the use of requirements to document
        #            both top-down and bottom-up aspects of the design process.
        #            Allowing requirement clauses to be stored as comments in
        #            source files makes it much easier for developers to create
        #            bottom-up requirements. Additionally, by placing the
        #            requirements next to the site of the implementation makes
        #            it leess likely that requirements will be forgotten or
        #            ignored."
        #   - type: mandate
        #   - state: draft
        # ...
        if relpath.endswith('py'):
            pass
예제 #60
0
def main():
    argparser = set_argparser()
    cmdargs = argparser.parse_args()
    scm = SigmaConfigurationManager()

    logger = logging.getLogger(__name__)
    if cmdargs.debug:  # pragma: no cover
        logger.setLevel(logging.DEBUG)

    if cmdargs.lists:
        print("Backends (Targets):")
        list_backends(cmdargs.debug)

        print()
        print("Configurations (Sources):")
        list_configurations(backend=cmdargs.target, scm=scm)

        print()
        print("Modifiers:")
        list_modifiers(modifiers=modifiers)
        sys.exit(0)
    elif len(cmdargs.inputs) == 0:
        print("Nothing to do!")
        argparser.print_usage()
        sys.exit(0)

    if cmdargs.target is None:
        print("No target selected, select one with -t/--target")
        argparser.print_usage()
        sys.exit(ERR_NO_TARGET)

    rulefilter = None
    if cmdargs.filter:
        try:
            rulefilter = SigmaRuleFilter(cmdargs.filter)
        except SigmaRuleFilterParseException as e:
            print("Parse error in Sigma rule filter expression: %s" % str(e),
                  file=sys.stderr)
            sys.exit(ERR_RULE_FILTER_PARSING)

    sigmaconfigs = SigmaConfigurationChain()
    backend_class = backends.getBackend(cmdargs.target)
    if cmdargs.config is None:
        if backend_class.config_required and not cmdargs.shoot_yourself_in_the_foot:
            print(
                "The backend you want to use usually requires a configuration to generate valid results. Please provide one with --config/-c.",
                file=sys.stderr)
            print(
                "Available choices for this backend (get complete list with --lists/-l):"
            )
            list_configurations(backend=cmdargs.target, scm=scm)
            sys.exit(ERR_CONFIG_REQUIRED)
        if backend_class.default_config is not None:
            cmdargs.config = backend_class.default_config

    if cmdargs.config:
        order = 0
        for conf_name in cmdargs.config:
            try:
                sigmaconfig = scm.get(conf_name)
                if sigmaconfig.order is not None:
                    if sigmaconfig.order <= order and not cmdargs.shoot_yourself_in_the_foot:
                        print(
                            "The configurations were provided in the wrong order (order key check in config file)",
                            file=sys.stderr)
                        sys.exit(ERR_CONFIG_ORDER)
                    order = sigmaconfig.order

                try:
                    if cmdargs.target not in sigmaconfig.config["backends"]:
                        print(
                            "The configuration '{}' is not valid for backend '{}'. Valid choices are: {}"
                            .format(conf_name, cmdargs.target,
                                    ", ".join(sigmaconfig.config["backends"])),
                            file=sys.stderr)
                        sys.exit(ERR_CONFIG_ORDER)
                except KeyError:
                    pass

                sigmaconfigs.append(sigmaconfig)
            except OSError as e:
                print("Failed to open Sigma configuration file %s: %s" %
                      (conf_name, str(e)),
                      file=sys.stderr)
                exit(ERR_OPEN_CONFIG_FILE)
            except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
                print("Sigma configuration file %s is no valid YAML: %s" %
                      (conf_name, str(e)),
                      file=sys.stderr)
                exit(ERR_CONFIG_INVALID_YAML)
            except SigmaConfigParseError as e:
                print("Sigma configuration parse error in %s: %s" %
                      (conf_name, str(e)),
                      file=sys.stderr)
                exit(ERR_CONFIG_PARSING)

    if cmdargs.output_fields:
        if cmdargs.output_format:
            output_fields_rejected = [
                field for field in cmdargs.output_fields.split(",")
                if field not in allowed_fields
            ]  # Not allowed fields
            if output_fields_rejected:
                print(
                    "These fields are not allowed (check help for allow field list) : %s"
                    % (", ".join(output_fields_rejected)),
                    file=sys.stderr)
                exit(ERR_OUTPUT_FORMAT)
            else:
                output_fields_filtered = [
                    field for field in cmdargs.output_fields.split(",")
                    if field in allowed_fields
                ]  # Keep only allowed fields
        else:
            print(
                "The '--output-fields' or '-of' arguments must be used with '--output-format' or '-oF' equal to 'json' or 'yaml'",
                file=sys.stderr)
            exit(ERR_OUTPUT_FORMAT)

    backend_options = BackendOptions(cmdargs.backend_option,
                                     cmdargs.backend_config)
    backend = backend_class(sigmaconfigs, backend_options)

    filename_ext = cmdargs.output_extention
    filename = cmdargs.output
    fileprefix = None
    if filename:
        if filename_ext:
            if filename_ext[0] == '.':
                pass
            else:
                filename_ext = '.' + filename_ext
        else:
            filename_ext = '.rule'

        if filename[-1:] in ['_', '/', '\\']:
            fileprefix = filename
        else:
            try:
                out = open(filename, "w", encoding='utf-8')
            except (IOError, OSError) as e:
                print("Failed to open output file '%s': %s" %
                      (filename, str(e)),
                      file=sys.stderr)
                exit(ERR_OUTPUT)
    else:
        out = sys.stdout

    error = 0
    output_array = []
    for sigmafile in get_inputs(cmdargs.inputs, cmdargs.recurse):
        logger.debug("* Processing Sigma input %s" % (sigmafile))
        try:
            if cmdargs.inputs == ['-']:
                f = sigmafile
            else:
                f = sigmafile.open(encoding='utf-8')
            parser = SigmaCollectionParser(f, sigmaconfigs, rulefilter,
                                           sigmafile)
            results = parser.generate(backend)

            nb_result = len(list(copy.deepcopy(results)))
            inc_filenane = None if nb_result < 2 else 0

            newline_separator = '\0' if cmdargs.print0 else '\n'

            results = list(
                results
            )  # Since results is an iterator and used twice we convert it a list
            for result in results:
                if not fileprefix == None and not inc_filenane == None:  #yml action
                    try:
                        filename = fileprefix + str(sigmafile.name)
                        filename = filename.replace(
                            '.yml', '_' + str(inc_filenane) + filename_ext)
                        inc_filenane += 1
                        out = open(filename, "w", encoding='utf-8')
                    except (IOError, OSError) as e:
                        print("Failed to open output file '%s': %s" %
                              (filename, str(e)),
                              file=sys.stderr)
                        exit(ERR_OUTPUT)
                elif not fileprefix == None and inc_filenane == None:  # a simple yml
                    try:
                        filename = fileprefix + str(sigmafile.name)
                        filename = filename.replace('.yml', filename_ext)
                        out = open(filename, "w", encoding='utf-8')
                    except (IOError, OSError) as e:
                        print("Failed to open output file '%s': %s" %
                              (filename, str(e)),
                              file=sys.stderr)
                        exit(ERR_OUTPUT)
                if not cmdargs.output_fields:
                    print(result, file=out, end=newline_separator)

            if cmdargs.output_fields:  # Handle output fields
                output = {}
                f.seek(0)
                docs = yaml.load_all(f, Loader=yaml.FullLoader)
                for doc in docs:
                    for k, v in doc.items():
                        if k in output_fields_filtered:
                            output[k] = v
                output['rule'] = [result for result in results]
                if "filename" in output_fields_filtered:
                    output['filename'] = str(sigmafile.name)
                output_array.append(output)

            if nb_result == 0:  # backend get only 1 output
                if not fileprefix == None:  # want a prefix anyway
                    try:
                        filename = "%s%s_mono_output%s" % (
                            fileprefix, cmdargs.target, filename_ext)
                        out = open(filename, "w", encoding='utf-8')
                        fileprefix = None  # no need to open the same file many time
                    except (IOError, OSError) as e:
                        print("Failed to open output file '%s': %s" %
                              (filename, str(e)),
                              file=sys.stderr)
                        exit(ERR_OUTPUT)

        except OSError as e:
            print("Failed to open Sigma file %s: %s" % (sigmafile, str(e)),
                  file=sys.stderr)
            error = ERR_OPEN_SIGMA_RULE
        except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
            print("Error: Sigma file %s is no valid YAML: %s" %
                  (sigmafile, str(e)),
                  file=sys.stderr)
            error = ERR_INVALID_YAML
            if not cmdargs.defer_abort:
                sys.exit(error)
        except (SigmaParseError, SigmaCollectionParseError) as e:
            print("Error: Sigma parse error in %s: %s" % (sigmafile, str(e)),
                  file=sys.stderr)
            error = ERR_SIGMA_PARSING
            if not cmdargs.defer_abort:
                sys.exit(error)
        except NotSupportedError as e:
            print(
                "Error: The Sigma rule requires a feature that is not supported by the target system: "
                + str(e),
                file=sys.stderr)
            if not cmdargs.ignore_backend_errors:
                error = ERR_NOT_SUPPORTED
                if not cmdargs.defer_abort:
                    sys.exit(error)
        except BackendError as e:
            print("Error: Backend error in %s: %s" % (sigmafile, str(e)),
                  file=sys.stderr)
            if not cmdargs.ignore_backend_errors:
                error = ERR_BACKEND
                if not cmdargs.defer_abort:
                    sys.exit(error)
        except (NotImplementedError, TypeError) as e:
            print(
                "An unsupported feature is required for this Sigma rule (%s): "
                % (sigmafile) + str(e),
                file=sys.stderr)
            if not cmdargs.ignore_backend_errors:
                error = ERR_NOT_IMPLEMENTED
                if not cmdargs.defer_abort:
                    sys.exit(error)
        except PartialMatchError as e:
            print("Error: Partial field match error: %s" % str(e),
                  file=sys.stderr)
            if not cmdargs.ignore_backend_errors:
                error = ERR_PARTIAL_FIELD_MATCH
                if not cmdargs.defer_abort:
                    sys.exit(error)
        except FullMatchError as e:
            print("Error: Full field match error", file=sys.stderr)
            if not cmdargs.ignore_backend_errors:
                error = ERR_FULL_FIELD_MATCH
                if not cmdargs.defer_abort:
                    sys.exit(error)
        finally:
            try:
                f.close()
            except:
                pass

    result = backend.finalize()
    if result:
        print(result, file=out)

    if cmdargs.output_fields:
        if cmdargs.output_format == 'json':
            print(json.dumps(output_array, indent=4, ensure_ascii=False),
                  file=out)
        elif cmdargs.output_format == 'yaml':
            print(ruamel.yaml.round_trip_dump(output_array), file=out)

    out.close()

    sys.exit(error)